summaryrefslogtreecommitdiff
path: root/absl/container/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/btree.h790
-rw-r--r--absl/container/internal/btree_container.h229
-rw-r--r--absl/container/internal/common.h8
-rw-r--r--absl/container/internal/compressed_tuple.h41
-rw-r--r--absl/container/internal/compressed_tuple_test.cc4
-rw-r--r--absl/container/internal/container_memory.h78
-rw-r--r--absl/container/internal/container_memory_test.cc66
-rw-r--r--absl/container/internal/counting_allocator.h69
-rw-r--r--absl/container/internal/hash_function_defaults.h15
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc90
-rw-r--r--absl/container/internal/hash_generator_testing.cc6
-rw-r--r--absl/container/internal/hash_policy_traits.h31
-rw-r--r--absl/container/internal/hashtablez_sampler.cc3
-rw-r--r--absl/container/internal/hashtablez_sampler.h46
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc18
-rw-r--r--absl/container/internal/have_sse.h19
-rw-r--r--absl/container/internal/layout.h12
-rw-r--r--absl/container/internal/layout_test.cc4
-rw-r--r--absl/container/internal/raw_hash_set.h89
-rw-r--r--absl/container/internal/raw_hash_set_allocator_test.cc71
-rw-r--r--absl/container/internal/raw_hash_set_test.cc17
-rw-r--r--absl/container/internal/unordered_map_modifiers_test.h2
22 files changed, 1057 insertions, 651 deletions
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index fd5c0e7a..002ccc1e 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -65,6 +65,7 @@
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
+#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
#include "absl/utility/utility.h"
@@ -93,6 +94,19 @@ struct StringBtreeDefaultLess {
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
}
+ StringBtreeDefaultLess(std::less<absl::Cord>) {} // NOLINT
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs));
+ }
};
struct StringBtreeDefaultGreater {
@@ -107,17 +121,30 @@ struct StringBtreeDefaultGreater {
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
}
+ StringBtreeDefaultGreater(std::greater<absl::Cord>) {} // NOLINT
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
};
// A helper class to convert a boolean comparison into a three-way "compare-to"
-// comparison that returns a negative value to indicate less-than, zero to
-// indicate equality and a positive value to indicate greater-than. This helper
+// comparison that returns an `absl::weak_ordering`. This helper
// class is specialized for less<std::string>, greater<std::string>,
-// less<string_view>, and greater<string_view>.
+// less<string_view>, greater<string_view>, less<absl::Cord>, and
+// greater<absl::Cord>.
//
// key_compare_to_adapter is provided so that btree users
// automatically get the more efficient compare-to code when using common
-// google string types with common comparison functors.
+// Abseil string types with common comparison functors.
// These string-like specializations also turn on heterogeneous lookup by
// default.
template <typename Compare>
@@ -145,12 +172,25 @@ struct key_compare_to_adapter<std::greater<absl::string_view>> {
using type = StringBtreeDefaultGreater;
};
+template <>
+struct key_compare_to_adapter<std::less<absl::Cord>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<absl::Cord>> {
+ using type = StringBtreeDefaultGreater;
+};
+
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool Multi, typename SlotPolicy>
struct common_params {
- // If Compare is a common comparator for a std::string-like type, then we adapt it
+ // If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator.
using key_compare = typename key_compare_to_adapter<Compare>::type;
+ // True when key_compare has been adapted to StringBtreeDefault{Less,Greater}.
+ using is_key_compare_adapted =
+ absl::negation<std::is_same<key_compare, Compare>>;
// A type which indicates if we have a key-compare-to functor or a plain old
// key-compare functor.
using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
@@ -217,10 +257,6 @@ struct common_params {
static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
slot_policy::move(alloc, src, dest);
}
- static void move(Alloc *alloc, slot_type *first, slot_type *last,
- slot_type *result) {
- slot_policy::move(alloc, first, last, result);
- }
};
// A parameters structure for holding the type parameters for a btree_map.
@@ -252,9 +288,17 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
};
using is_map_container = std::true_type;
- static const Key &key(const value_type &x) { return x.first; }
- static const Key &key(const init_type &x) { return x.first; }
- static const Key &key(const slot_type *x) { return slot_policy::key(x); }
+ template <typename V>
+ static auto key(const V &value) -> decltype(value.first) {
+ return value.first;
+ }
+ static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+ static const Key &key(slot_type *s) { return slot_policy::key(s); }
+ // For use in node handle.
+ static auto mutable_key(slot_type *s)
+ -> decltype(slot_policy::mutable_key(s)) {
+ return slot_policy::mutable_key(s);
+ }
static mapped_type &value(value_type *value) { return value->second; }
};
@@ -295,13 +339,6 @@ struct set_slot_policy {
static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
*dest = std::move(*src);
}
-
- template <typename Alloc>
- static void move(Alloc *alloc, slot_type *first, slot_type *last,
- slot_type *result) {
- for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
- move(alloc, src, dest);
- }
};
// A parameters structure for holding the type parameters for a btree_set.
@@ -315,8 +352,10 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
using value_compare = typename set_params::common_params::key_compare;
using is_map_container = std::false_type;
- static const Key &key(const value_type &x) { return x; }
- static const Key &key(const slot_type *x) { return *x; }
+ template <typename V>
+ static const V &key(const V &value) { return value; }
+ static const Key &key(const slot_type *slot) { return *slot; }
+ static const Key &key(slot_type *slot) { return *slot; }
};
// An adapter class that converts a lower-bound compare into an upper-bound
@@ -326,8 +365,8 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
template <typename Compare>
struct upper_bound_adapter {
explicit upper_bound_adapter(const Compare &c) : comp(c) {}
- template <typename K, typename LK>
- bool operator()(const K &a, const LK &b) const {
+ template <typename K1, typename K2>
+ bool operator()(const K1 &a, const K2 &b) const {
// Returns true when a is not greater than b.
return !compare_internal::compare_result_as_less_than(comp(b, a));
}
@@ -716,14 +755,10 @@ class btree_node {
template <typename... Args>
void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
- // Removes the value at position i, shifting all existing values and children
- // at positions > i to the left by 1.
- void remove_value(int i, allocator_type *alloc);
-
- // Removes the values at positions [i, i + to_erase), shifting all values
- // after that range to the left by to_erase. Does not change children at all.
- void remove_values_ignore_children(int i, int to_erase,
- allocator_type *alloc);
+ // Removes the values at positions [i, i + to_erase), shifting all existing
+ // values and children after that range to the left by to_erase. Clears all
+ // children between [i, i + to_erase).
+ void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
// Rebalances a node with its right sibling.
void rebalance_right_to_left(int to_move, btree_node *right,
@@ -735,40 +770,36 @@ class btree_node {
void split(int insert_position, btree_node *dest, allocator_type *alloc);
// Merges a node with its right sibling, moving all of the values and the
- // delimiting key in the parent node onto itself.
- void merge(btree_node *sibling, allocator_type *alloc);
-
- // Swap the contents of "this" and "src".
- void swap(btree_node *src, allocator_type *alloc);
+ // delimiting key in the parent node onto itself, and deleting the src node.
+ void merge(btree_node *src, allocator_type *alloc);
// Node allocation/deletion routines.
- static btree_node *init_leaf(btree_node *n, btree_node *parent,
- int max_count) {
- n->set_parent(parent);
- n->set_position(0);
- n->set_start(0);
- n->set_finish(0);
- n->set_max_count(max_count);
+ void init_leaf(btree_node *parent, int max_count) {
+ set_parent(parent);
+ set_position(0);
+ set_start(0);
+ set_finish(0);
+ set_max_count(max_count);
absl::container_internal::SanitizerPoisonMemoryRegion(
- n->start_slot(), max_count * sizeof(slot_type));
- return n;
+ start_slot(), max_count * sizeof(slot_type));
}
- static btree_node *init_internal(btree_node *n, btree_node *parent) {
- init_leaf(n, parent, kNodeValues);
+ void init_internal(btree_node *parent) {
+ init_leaf(parent, kNodeValues);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
- n->set_max_count(kInternalNodeMaxCount);
+ set_max_count(kInternalNodeMaxCount);
absl::container_internal::SanitizerPoisonMemoryRegion(
- &n->mutable_child(n->start()),
- (kNodeValues + 1) * sizeof(btree_node *));
- return n;
+ &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *));
}
- void destroy(allocator_type *alloc) {
- for (int i = start(); i < finish(); ++i) {
- value_destroy(i, alloc);
- }
+
+ static void deallocate(const size_type size, btree_node *node,
+ allocator_type *alloc) {
+ absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
}
+ // Deletes a node and all of its children.
+ static void clear_and_delete(btree_node *node, allocator_type *alloc);
+
public:
// Exposed only for tests.
static bool testonly_uses_linear_node_search() {
@@ -777,33 +808,55 @@ class btree_node {
private:
template <typename... Args>
- void value_init(const size_type i, allocator_type *alloc, Args &&... args) {
+ void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
absl::container_internal::SanitizerUnpoisonObject(slot(i));
params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
}
- void value_destroy(const size_type i, allocator_type *alloc) {
+ void value_destroy(const field_type i, allocator_type *alloc) {
params_type::destroy(alloc, slot(i));
absl::container_internal::SanitizerPoisonObject(slot(i));
}
+ void value_destroy_n(const field_type i, const field_type n,
+ allocator_type *alloc) {
+ for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
+ params_type::destroy(alloc, s);
+ absl::container_internal::SanitizerPoisonObject(s);
+ }
+ }
+
+ static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
+ absl::container_internal::SanitizerUnpoisonObject(dest);
+ params_type::transfer(alloc, dest, src);
+ absl::container_internal::SanitizerPoisonObject(src);
+ }
+
+ // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
+ void transfer(const size_type dest_i, const size_type src_i,
+ btree_node *src_node, allocator_type *alloc) {
+ transfer(slot(dest_i), src_node->slot(src_i), alloc);
+ }
- // Move n values starting at value i in this node into the values starting at
- // value j in node x.
- void uninitialized_move_n(const size_type n, const size_type i,
- const size_type j, btree_node *x,
- allocator_type *alloc) {
- absl::container_internal::SanitizerUnpoisonMemoryRegion(
- x->slot(j), n * sizeof(slot_type));
- for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j);
+ // Transfers `n` values starting at value `src_i` in `src_node` into the
+ // values starting at value `dest_i` in `this`.
+ void transfer_n(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i), *end = src + n,
+ *dest = slot(dest_i);
src != end; ++src, ++dest) {
- params_type::construct(alloc, dest, src);
+ transfer(dest, src, alloc);
}
}
- // Destroys a range of n values, starting at index i.
- void value_destroy_n(const size_type i, const size_type n,
- allocator_type *alloc) {
- for (int j = 0; j < n; ++j) {
- value_destroy(i + j, alloc);
+ // Same as above, except that we start at the end and work our way to the
+ // beginning.
+ void transfer_n_backward(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
+ *dest = slot(dest_i + n - 1);
+ src != end; --src, --dest) {
+ transfer(dest, src, alloc);
}
}
@@ -856,8 +909,8 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, iterator>::value &&
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
- btree_iterator(const btree_iterator<N, R, P> &x) // NOLINT
- : node(x.node), position(x.position) {}
+ btree_iterator(const btree_iterator<N, R, P> &other) // NOLINT
+ : node(other.node), position(other.position) {}
private:
// This SFINAE allows explicit conversions from const_iterator to
@@ -869,8 +922,8 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
std::is_same<btree_iterator, iterator>::value,
int> = 0>
- explicit btree_iterator(const btree_iterator<N, R, P> &x)
- : node(const_cast<node_type *>(x.node)), position(x.position) {}
+ explicit btree_iterator(const btree_iterator<N, R, P> &other)
+ : node(const_cast<node_type *>(other.node)), position(other.position) {}
// Increment/decrement the iterator.
void increment() {
@@ -890,16 +943,27 @@ struct btree_iterator {
void decrement_slow();
public:
- bool operator==(const const_iterator &x) const {
- return node == x.node && position == x.position;
+ bool operator==(const iterator &other) const {
+ return node == other.node && position == other.position;
+ }
+ bool operator==(const const_iterator &other) const {
+ return node == other.node && position == other.position;
}
- bool operator!=(const const_iterator &x) const {
- return node != x.node || position != x.position;
+ bool operator!=(const iterator &other) const {
+ return node != other.node || position != other.position;
+ }
+ bool operator!=(const const_iterator &other) const {
+ return node != other.node || position != other.position;
}
// Accessors for the key/value the iterator is pointing at.
- reference operator*() const { return node->value(position); }
- pointer operator->() const { return &node->value(position); }
+ reference operator*() const {
+ ABSL_HARDENING_ASSERT(node != nullptr);
+ ABSL_HARDENING_ASSERT(node->start() <= position);
+ ABSL_HARDENING_ASSERT(node->finish() > position);
+ return node->value(position);
+ }
+ pointer operator->() const { return &operator*(); }
btree_iterator &operator++() {
increment();
@@ -942,7 +1006,8 @@ struct btree_iterator {
// The node in the tree the iterator is pointing at.
Node *node;
// The position within the node of the tree the iterator is pointing at.
- // TODO(ezb): make this a field_type
+ // NOTE: this is an int rather than a field_type because iterators can point
+ // to invalid positions (such as -1) in certain circumstances.
int position;
};
@@ -950,6 +1015,10 @@ template <typename Params>
class btree {
using node_type = btree_node<Params>;
using is_key_compare_to = typename Params::is_key_compare_to;
+ using init_type = typename Params::init_type;
+ using field_type = typename node_type::field_type;
+ using is_multi_container = typename Params::is_multi_container;
+ using is_key_compare_adapted = typename Params::is_key_compare_adapted;
// We use a static empty node for the root/leftmost/rightmost of empty btrees
// in order to avoid branching in begin()/end().
@@ -984,7 +1053,7 @@ class btree {
#endif
}
- enum {
+ enum : uint32_t {
kNodeValues = node_type::kNodeValues,
kMinNodeValues = kNodeValues / 2,
};
@@ -994,9 +1063,9 @@ class btree {
node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
- node_stats &operator+=(const node_stats &x) {
- leaf_nodes += x.leaf_nodes;
- internal_nodes += x.internal_nodes;
+ node_stats &operator+=(const node_stats &other) {
+ leaf_nodes += other.leaf_nodes;
+ internal_nodes += other.internal_nodes;
return *this;
}
@@ -1028,15 +1097,15 @@ class btree {
private:
// For use in copy_or_move_values_in_order.
- const value_type &maybe_move_from_iterator(const_iterator x) { return *x; }
- value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); }
+ const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
+ value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); }
// Copies or moves (depending on the template parameter) the values in
- // x into this btree in their order in x. This btree must be empty before this
- // method is called. This method is used in copy construction, copy
- // assignment, and move assignment.
+ // other into this btree in their order in other. This btree must be empty
+ // before this method is called. This method is used in copy construction,
+ // copy assignment, and move assignment.
template <typename Btree>
- void copy_or_move_values_in_order(Btree *x);
+ void copy_or_move_values_in_order(Btree *other);
// Validates that various assumptions/requirements are true at compile time.
constexpr static bool static_assert_validation();
@@ -1044,12 +1113,12 @@ class btree {
public:
btree(const key_compare &comp, const allocator_type &alloc);
- btree(const btree &x);
- btree(btree &&x) noexcept
- : root_(std::move(x.root_)),
- rightmost_(absl::exchange(x.rightmost_, EmptyNode())),
- size_(absl::exchange(x.size_, 0)) {
- x.mutable_root() = EmptyNode();
+ btree(const btree &other);
+ btree(btree &&other) noexcept
+ : root_(std::move(other.root_)),
+ rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
+ size_(absl::exchange(other.size_, 0)) {
+ other.mutable_root() = EmptyNode();
}
~btree() {
@@ -1059,9 +1128,9 @@ class btree {
clear();
}
- // Assign the contents of x to *this.
- btree &operator=(const btree &x);
- btree &operator=(btree &&x) noexcept;
+ // Assign the contents of other to *this.
+ btree &operator=(const btree &other);
+ btree &operator=(btree &&other) noexcept;
iterator begin() { return iterator(leftmost()); }
const_iterator begin() const { return const_iterator(leftmost()); }
@@ -1099,23 +1168,21 @@ class btree {
}
// Finds the range of values which compare equal to key. The first member of
- // the returned pair is equal to lower_bound(key). The second member pair of
- // the pair is equal to upper_bound(key).
+ // the returned pair is equal to lower_bound(key). The second member of the
+ // pair is equal to upper_bound(key).
template <typename K>
- std::pair<iterator, iterator> equal_range(const K &key) {
- return {lower_bound(key), upper_bound(key)};
- }
+ std::pair<iterator, iterator> equal_range(const K &key);
template <typename K>
std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
- return {lower_bound(key), upper_bound(key)};
+ return const_cast<btree *>(this)->equal_range(key);
}
// Inserts a value into the btree only if it does not already exist. The
// boolean return value indicates whether insertion succeeded or failed.
// Requirement: if `key` already exists in the btree, does not consume `args`.
// Requirement: `key` is never referenced after consuming `args`.
- template <typename... Args>
- std::pair<iterator, bool> insert_unique(const key_type &key, Args &&... args);
+ template <typename K, typename... Args>
+ std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
// Inserts with hint. Checks to see if the value should be placed immediately
// before `position` in the tree. If so, then the insertion will take
@@ -1123,14 +1190,23 @@ class btree {
// logarithmic time as if a call to insert_unique() were made.
// Requirement: if `key` already exists in the btree, does not consume `args`.
// Requirement: `key` is never referenced after consuming `args`.
- template <typename... Args>
+ template <typename K, typename... Args>
std::pair<iterator, bool> insert_hint_unique(iterator position,
- const key_type &key,
+ const K &key,
Args &&... args);
// Insert a range of values into the btree.
+ // Note: the first overload avoids constructing a value_type if the key
+ // already exists in the btree.
+ template <typename InputIterator,
+ typename = decltype(std::declval<const key_compare &>()(
+ params_type::key(*std::declval<InputIterator>()),
+ std::declval<const key_type &>()))>
+ void insert_iterator_unique(InputIterator b, InputIterator e, int);
+ // We need the second overload for cases in which we need to construct a
+ // value_type in order to compare it with the keys already in the btree.
template <typename InputIterator>
- void insert_iterator_unique(InputIterator b, InputIterator e);
+ void insert_iterator_unique(InputIterator b, InputIterator e, char);
// Inserts a value into the btree.
template <typename ValueType>
@@ -1204,15 +1280,15 @@ class btree {
// Clear the btree, deleting all of the values it contains.
void clear();
- // Swap the contents of *this and x.
- void swap(btree &x);
+ // Swaps the contents of `this` and `other`.
+ void swap(btree &other);
const key_compare &key_comp() const noexcept {
return root_.template get<0>();
}
- template <typename K, typename LK>
- bool compare_keys(const K &x, const LK &y) const {
- return compare_internal::compare_result_as_less_than(key_comp()(x, y));
+ template <typename K1, typename K2>
+ bool compare_keys(const K1 &a, const K2 &b) const {
+ return compare_internal::compare_result_as_less_than(key_comp()(a, b));
}
value_compare value_comp() const { return value_compare(key_comp()); }
@@ -1322,38 +1398,24 @@ class btree {
// Node creation/deletion routines.
node_type *new_internal_node(node_type *parent) {
- node_type *p = allocate(node_type::InternalSize());
- return node_type::init_internal(p, parent);
+ node_type *n = allocate(node_type::InternalSize());
+ n->init_internal(parent);
+ return n;
}
node_type *new_leaf_node(node_type *parent) {
- node_type *p = allocate(node_type::LeafSize());
- return node_type::init_leaf(p, parent, kNodeValues);
+ node_type *n = allocate(node_type::LeafSize());
+ n->init_leaf(parent, kNodeValues);
+ return n;
}
node_type *new_leaf_root_node(const int max_count) {
- node_type *p = allocate(node_type::LeafSize(max_count));
- return node_type::init_leaf(p, p, max_count);
+ node_type *n = allocate(node_type::LeafSize(max_count));
+ n->init_leaf(/*parent=*/n, max_count);
+ return n;
}
// Deletion helper routines.
- void erase_same_node(iterator begin, iterator end);
- iterator erase_from_leaf_node(iterator begin, size_type to_erase);
iterator rebalance_after_delete(iterator iter);
- // Deallocates a node of a certain size in bytes using the allocator.
- void deallocate(const size_type size, node_type *node) {
- absl::container_internal::Deallocate<node_type::Alignment()>(
- mutable_allocator(), node, size);
- }
-
- void delete_internal_node(node_type *node) {
- node->destroy(mutable_allocator());
- deallocate(node_type::InternalSize(), node);
- }
- void delete_leaf_node(node_type *node) {
- node->destroy(mutable_allocator());
- deallocate(node_type::LeafSize(node->max_count()), node);
- }
-
// Rebalances or splits the node iter points to.
void rebalance_or_split(iterator *iter);
@@ -1422,9 +1484,6 @@ class btree {
template <typename K>
iterator internal_find(const K &key) const;
- // Deletes a node and all of its children.
- void internal_clear(node_type *node);
-
// Verifies the tree structure of node.
int internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const;
@@ -1477,10 +1536,8 @@ inline void btree_node<P>::emplace_value(const size_type i,
// Shift old values to create space for new value and then construct it in
// place.
if (i < finish()) {
- value_init(finish(), alloc, slot(finish() - 1));
- for (size_type j = finish() - 1; j > i; --j)
- params_type::move(alloc, slot(j - 1), slot(j));
- value_destroy(i, alloc);
+ transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
+ alloc);
}
value_init(i, alloc, std::forward<Args>(args)...);
set_finish(finish() + 1);
@@ -1494,24 +1551,27 @@ inline void btree_node<P>::emplace_value(const size_type i,
}
template <typename P>
-inline void btree_node<P>::remove_value(const int i, allocator_type *alloc) {
- if (!leaf() && finish() > i + 1) {
- assert(child(i + 1)->count() == 0);
- for (size_type j = i + 1; j < finish(); ++j) {
- set_child(j, child(j + 1));
+inline void btree_node<P>::remove_values(const field_type i,
+ const field_type to_erase,
+ allocator_type *alloc) {
+ // Transfer values after the removed range into their new places.
+ value_destroy_n(i, to_erase, alloc);
+ const field_type orig_finish = finish();
+ const field_type src_i = i + to_erase;
+ transfer_n(orig_finish - src_i, i, src_i, this, alloc);
+
+ if (!leaf()) {
+ // Delete all children between begin and end.
+ for (int j = 0; j < to_erase; ++j) {
+ clear_and_delete(child(i + j + 1), alloc);
+ }
+ // Rotate children after end into new positions.
+ for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
+ set_child(j - to_erase, child(j));
+ clear_child(j);
}
- clear_child(finish());
}
-
- remove_values_ignore_children(i, /*to_erase=*/1, alloc);
-}
-
-template <typename P>
-inline void btree_node<P>::remove_values_ignore_children(
- const int i, const int to_erase, allocator_type *alloc) {
- params_type::move(alloc, slot(i + to_erase), finish_slot(), slot(i));
- value_destroy_n(finish() - to_erase, to_erase, alloc);
- set_finish(finish() - to_erase);
+ set_finish(orig_finish - to_erase);
}
template <typename P>
@@ -1525,22 +1585,17 @@ void btree_node<P>::rebalance_right_to_left(const int to_move,
assert(to_move <= right->count());
// 1) Move the delimiting value in the parent to the left node.
- value_init(finish(), alloc, parent()->slot(position()));
+ transfer(finish(), position(), parent(), alloc);
// 2) Move the (to_move - 1) values from the right node to the left node.
- right->uninitialized_move_n(to_move - 1, right->start(), finish() + 1, this,
- alloc);
+ transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc);
// 3) Move the new delimiting value to the parent from the right node.
- params_type::move(alloc, right->slot(to_move - 1),
- parent()->slot(position()));
+ parent()->transfer(position(), right->start() + to_move - 1, right, alloc);
- // 4) Shift the values in the right node to their correct position.
- params_type::move(alloc, right->slot(to_move), right->finish_slot(),
- right->start_slot());
-
- // 5) Destroy the now-empty to_move entries in the right node.
- right->value_destroy_n(right->finish() - to_move, to_move, alloc);
+ // 4) Shift the values in the right node to their correct positions.
+ right->transfer_n(right->count() - to_move, right->start(),
+ right->start() + to_move, right, alloc);
if (!leaf()) {
// Move the child pointers from the right to the left node.
@@ -1575,54 +1630,19 @@ void btree_node<P>::rebalance_left_to_right(const int to_move,
// Lastly, a new delimiting value is moved from the left node into the
// parent, and the remaining empty left node entries are destroyed.
- if (right->count() >= to_move) {
- // The original location of the right->count() values are sufficient to hold
- // the new to_move entries from the parent and left node.
-
- // 1) Shift existing values in the right node to their correct positions.
- right->uninitialized_move_n(to_move, right->finish() - to_move,
- right->finish(), right, alloc);
- for (slot_type *src = right->slot(right->finish() - to_move - 1),
- *dest = right->slot(right->finish() - 1),
- *end = right->start_slot();
- src >= end; --src, --dest) {
- params_type::move(alloc, src, dest);
- }
+ // 1) Shift existing values in the right node to their correct positions.
+ right->transfer_n_backward(right->count(), right->start() + to_move,
+ right->start(), right, alloc);
- // 2) Move the delimiting value in the parent to the right node.
- params_type::move(alloc, parent()->slot(position()),
- right->slot(to_move - 1));
+ // 2) Move the delimiting value in the parent to the right node.
+ right->transfer(right->start() + to_move - 1, position(), parent(), alloc);
- // 3) Move the (to_move - 1) values from the left node to the right node.
- params_type::move(alloc, slot(finish() - (to_move - 1)), finish_slot(),
- right->start_slot());
- } else {
- // The right node does not have enough initialized space to hold the new
- // to_move entries, so part of them will move to uninitialized space.
-
- // 1) Shift existing values in the right node to their correct positions.
- right->uninitialized_move_n(right->count(), right->start(),
- right->start() + to_move, right, alloc);
-
- // 2) Move the delimiting value in the parent to the right node.
- right->value_init(to_move - 1, alloc, parent()->slot(position()));
-
- // 3) Move the (to_move - 1) values from the left node to the right node.
- const size_type uninitialized_remaining = to_move - right->count() - 1;
- uninitialized_move_n(uninitialized_remaining,
- finish() - uninitialized_remaining, right->finish(),
- right, alloc);
- params_type::move(alloc, slot(finish() - (to_move - 1)),
- slot(finish() - uninitialized_remaining),
- right->start_slot());
- }
+ // 3) Move the (to_move - 1) values from the left node to the right node.
+ right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this,
+ alloc);
// 4) Move the new delimiting value to the parent from the left node.
- params_type::move(alloc, slot(finish() - to_move),
- parent()->slot(position()));
-
- // 5) Destroy the now-empty to_move entries in the left node.
- value_destroy_n(finish() - to_move, to_move, alloc);
+ parent()->transfer(position(), finish() - to_move, this, alloc);
if (!leaf()) {
// Move the child pointers from the left to the right node.
@@ -1662,10 +1682,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
assert(count() >= 1);
// Move values from the left sibling to the right sibling.
- uninitialized_move_n(dest->count(), finish(), dest->start(), dest, alloc);
-
- // Destroy the now-empty entries in the left node.
- value_destroy_n(finish(), dest->count(), alloc);
+ dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc);
// The split key is the largest value in the left sibling.
--mutable_finish();
@@ -1692,11 +1709,7 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
value_init(finish(), alloc, parent()->slot(position()));
// Move the values from the right to the left node.
- src->uninitialized_move_n(src->count(), src->start(), finish() + 1, this,
- alloc);
-
- // Destroy the now-empty entries in the right node.
- src->value_destroy_n(src->start(), src->count(), alloc);
+ transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
if (!leaf()) {
// Move the child pointers from the right to the left node.
@@ -1710,56 +1723,59 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
set_finish(start() + 1 + count() + src->count());
src->set_finish(src->start());
- // Remove the value on the parent node.
- parent()->remove_value(position(), alloc);
+ // Remove the value on the parent node and delete the src node.
+ parent()->remove_values(position(), /*to_erase=*/1, alloc);
}
template <typename P>
-void btree_node<P>::swap(btree_node *x, allocator_type *alloc) {
- using std::swap;
- assert(leaf() == x->leaf());
-
- // Determine which is the smaller/larger node.
- btree_node *smaller = this, *larger = x;
- if (smaller->count() > larger->count()) {
- swap(smaller, larger);
+void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
+ if (node->leaf()) {
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ return;
}
-
- // Swap the values.
- for (slot_type *a = smaller->start_slot(), *b = larger->start_slot(),
- *end = smaller->finish_slot();
- a != end; ++a, ++b) {
- params_type::swap(alloc, a, b);
+ if (node->count() == 0) {
+ deallocate(InternalSize(), node, alloc);
+ return;
}
- // Move values that can't be swapped.
- const size_type to_move = larger->count() - smaller->count();
- larger->uninitialized_move_n(to_move, smaller->finish(), smaller->finish(),
- smaller, alloc);
- larger->value_destroy_n(smaller->finish(), to_move, alloc);
+ // The parent of the root of the subtree we are deleting.
+ btree_node *delete_root_parent = node->parent();
- if (!leaf()) {
- // Swap the child pointers.
- std::swap_ranges(&smaller->mutable_child(smaller->start()),
- &smaller->mutable_child(smaller->finish() + 1),
- &larger->mutable_child(larger->start()));
- // Update swapped children's parent pointers.
- int i = smaller->start();
- int j = larger->start();
- for (; i <= smaller->finish(); ++i, ++j) {
- smaller->child(i)->set_parent(smaller);
- larger->child(j)->set_parent(larger);
- }
- // Move the child pointers that couldn't be swapped.
- for (; j <= larger->finish(); ++i, ++j) {
- smaller->init_child(i, larger->child(j));
- larger->clear_child(j);
- }
+ // Navigate to the leftmost leaf under node, and then delete upwards.
+ while (!node->leaf()) node = node->start_child();
+ // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which
+ // isn't guaranteed to be a valid `field_type`.
+ int pos = node->position();
+ btree_node *parent = node->parent();
+ for (;;) {
+ // In each iteration of the next loop, we delete one leaf node and go right.
+ assert(pos <= parent->finish());
+ do {
+ node = parent->child(pos);
+ if (!node->leaf()) {
+ // Navigate to the leftmost leaf under node.
+ while (!node->leaf()) node = node->start_child();
+ pos = node->position();
+ parent = node->parent();
+ }
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ ++pos;
+ } while (pos <= parent->finish());
+
+ // Once we've deleted all children of parent, delete parent and go up/right.
+ assert(pos > parent->finish());
+ do {
+ node = parent;
+ pos = node->position();
+ parent = node->parent();
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(InternalSize(), node, alloc);
+ if (parent == delete_root_parent) return;
+ ++pos;
+ } while (pos > parent->finish());
}
-
- // Swap the `finish`s.
- // TODO(ezb): with floating storage, will also need to swap starts.
- swap(mutable_finish(), x->mutable_finish());
}
////
@@ -1774,6 +1790,7 @@ void btree_iterator<N, R, P>::increment_slow() {
position = node->position();
node = node->parent();
}
+ // TODO(ezb): assert we aren't incrementing end() instead of handling.
if (position == node->finish()) {
*this = save;
}
@@ -1797,6 +1814,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
position = node->position() - 1;
node = node->parent();
}
+ // TODO(ezb): assert we aren't decrementing begin() instead of handling.
if (position < node->start()) {
*this = save;
}
@@ -1814,7 +1832,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
// btree methods
template <typename P>
template <typename Btree>
-void btree<P>::copy_or_move_values_in_order(Btree *x) {
+void btree<P>::copy_or_move_values_in_order(Btree *other) {
static_assert(std::is_same<btree, Btree>::value ||
std::is_same<const btree, Btree>::value,
"Btree type must be same or const.");
@@ -1822,11 +1840,11 @@ void btree<P>::copy_or_move_values_in_order(Btree *x) {
// We can avoid key comparisons because we know the order of the
// values is the same order we'll store them in.
- auto iter = x->begin();
- if (iter == x->end()) return;
+ auto iter = other->begin();
+ if (iter == other->end()) return;
insert_multi(maybe_move_from_iterator(iter));
++iter;
- for (; iter != x->end(); ++iter) {
+ for (; iter != other->end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end
// of the tree.
internal_emplace(end(), maybe_move_from_iterator(iter));
@@ -1869,13 +1887,48 @@ btree<P>::btree(const key_compare &comp, const allocator_type &alloc)
: root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
template <typename P>
-btree<P>::btree(const btree &x) : btree(x.key_comp(), x.allocator()) {
- copy_or_move_values_in_order(&x);
+btree<P>::btree(const btree &other)
+ : btree(other.key_comp(), other.allocator()) {
+ copy_or_move_values_in_order(&other);
}
template <typename P>
-template <typename... Args>
-auto btree<P>::insert_unique(const key_type &key, Args &&... args)
+template <typename K>
+auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
+ const iterator lower = lower_bound(key);
+ // TODO(ezb): we should be able to avoid this comparison when there's a
+ // three-way comparator.
+ if (lower == end() || compare_keys(key, lower.key())) return {lower, lower};
+
+ const iterator next = std::next(lower);
+ // When the comparator is heterogeneous, we can't assume that comparison with
+ // non-`key_type` will be equivalent to `key_type` comparisons so there
+ // could be multiple equivalent keys even in a unique-container. But for
+ // heterogeneous comparisons from the default string adapted comparators, we
+ // don't need to worry about this.
+ if (!is_multi_container::value &&
+ (std::is_same<K, key_type>::value || is_key_compare_adapted::value)) {
+ // The next iterator after lower must point to a key greater than `key`.
+ // Note: if this assert fails, then it may indicate that the comparator does
+ // not meet the equivalence requirements for Compare
+ // (see https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(next == end() || compare_keys(key, next.key()));
+ return {lower, next};
+ }
+ // Try once more to avoid the call to upper_bound() if there's only one
+ // equivalent key. This should prevent all calls to upper_bound() in cases of
+ // unique-containers with heterogeneous comparators in which all comparison
+ // operators are equivalent.
+ if (next == end() || compare_keys(key, next.key())) return {lower, next};
+
+ // In this case, we need to call upper_bound() to avoid worst case O(N)
+ // behavior if we were to iterate over equal keys.
+ return {lower, upper_bound(key)};
+}
+
+template <typename P>
+template <typename K, typename... Args>
+auto btree<P>::insert_unique(const K &key, Args &&... args)
-> std::pair<iterator, bool> {
if (empty()) {
mutable_root() = rightmost_ = new_leaf_root_node(1);
@@ -1900,8 +1953,8 @@ auto btree<P>::insert_unique(const key_type &key, Args &&... args)
}
template <typename P>
-template <typename... Args>
-inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
+template <typename K, typename... Args>
+inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
Args &&... args)
-> std::pair<iterator, bool> {
if (!empty()) {
@@ -1925,14 +1978,23 @@ inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
}
template <typename P>
-template <typename InputIterator>
-void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e) {
+template <typename InputIterator, typename>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
for (; b != e; ++b) {
insert_hint_unique(end(), params_type::key(*b), *b);
}
}
template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
+ for (; b != e; ++b) {
+ init_type value(*b);
+ insert_hint_unique(end(), params_type::key(value), std::move(value));
+ }
+}
+
+template <typename P>
template <typename ValueType>
auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
if (empty()) {
@@ -1977,46 +2039,47 @@ void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
}
template <typename P>
-auto btree<P>::operator=(const btree &x) -> btree & {
- if (this != &x) {
+auto btree<P>::operator=(const btree &other) -> btree & {
+ if (this != &other) {
clear();
- *mutable_key_comp() = x.key_comp();
+ *mutable_key_comp() = other.key_comp();
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
- *mutable_allocator() = x.allocator();
+ *mutable_allocator() = other.allocator();
}
- copy_or_move_values_in_order(&x);
+ copy_or_move_values_in_order(&other);
}
return *this;
}
template <typename P>
-auto btree<P>::operator=(btree &&x) noexcept -> btree & {
- if (this != &x) {
+auto btree<P>::operator=(btree &&other) noexcept -> btree & {
+ if (this != &other) {
clear();
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
// Note: `root_` also contains the allocator and the key comparator.
- swap(root_, x.root_);
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ swap(root_, other.root_);
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
} else {
- if (allocator() == x.allocator()) {
- swap(mutable_root(), x.mutable_root());
- swap(*mutable_key_comp(), *x.mutable_key_comp());
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ if (allocator() == other.allocator()) {
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
} else {
// We aren't allowed to propagate the allocator and the allocator is
// different so we can't take over its memory. We must move each element
- // individually. We need both `x` and `this` to have `x`s key comparator
- // while moving the values so we can't swap the key comparators.
- *mutable_key_comp() = x.key_comp();
- copy_or_move_values_in_order(&x);
+ // individually. We need both `other` and `this` to have `other`s key
+ // comparator while moving the values so we can't swap the key
+ // comparators.
+ *mutable_key_comp() = other.key_comp();
+ copy_or_move_values_in_order(&other);
}
}
}
@@ -2028,7 +2091,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
bool internal_delete = false;
if (!iter.node->leaf()) {
// Deletion of a value on an internal node. First, move the largest value
- // from our left child here, then delete that position (in remove_value()
+ // from our left child here, then delete that position (in remove_values()
// below). We can get to the largest value from our left child by
// decrementing iter.
iterator internal_iter(iter);
@@ -2040,7 +2103,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
}
// Delete the key from the leaf.
- iter.node->remove_value(iter.position, mutable_allocator());
+ iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
--size_;
// We want to return the next value after the one we just erased. If we
@@ -2115,7 +2178,9 @@ auto btree<P>::erase_range(iterator begin, iterator end)
}
if (begin.node == end.node) {
- erase_same_node(begin, end);
+ assert(end.position > begin.position);
+ begin.node->remove_values(begin.position, end.position - begin.position,
+ mutable_allocator());
size_ -= count;
return {count, rebalance_after_delete(begin)};
}
@@ -2125,8 +2190,11 @@ auto btree<P>::erase_range(iterator begin, iterator end)
if (begin.node->leaf()) {
const size_type remaining_to_erase = size_ - target_size;
const size_type remaining_in_node = begin.node->finish() - begin.position;
- begin = erase_from_leaf_node(
- begin, (std::min)(remaining_to_erase, remaining_in_node));
+ const size_type to_erase =
+ (std::min)(remaining_to_erase, remaining_in_node);
+ begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+ size_ -= to_erase;
+ begin = rebalance_after_delete(begin);
} else {
begin = erase(begin);
}
@@ -2135,51 +2203,6 @@ auto btree<P>::erase_range(iterator begin, iterator end)
}
template <typename P>
-void btree<P>::erase_same_node(iterator begin, iterator end) {
- assert(begin.node == end.node);
- assert(end.position > begin.position);
-
- node_type *node = begin.node;
- size_type to_erase = end.position - begin.position;
- if (!node->leaf()) {
- // Delete all children between begin and end.
- for (size_type i = 0; i < to_erase; ++i) {
- internal_clear(node->child(begin.position + i + 1));
- }
- // Rotate children after end into new positions.
- for (size_type i = begin.position + to_erase + 1; i <= node->finish();
- ++i) {
- node->set_child(i - to_erase, node->child(i));
- node->clear_child(i);
- }
- }
- node->remove_values_ignore_children(begin.position, to_erase,
- mutable_allocator());
-
- // Do not need to update rightmost_, because
- // * either end == this->end(), and therefore node == rightmost_, and still
- // exists
- // * or end != this->end(), and therefore rightmost_ hasn't been erased, since
- // it wasn't covered in [begin, end)
-}
-
-template <typename P>
-auto btree<P>::erase_from_leaf_node(iterator begin, size_type to_erase)
- -> iterator {
- node_type *node = begin.node;
- assert(node->leaf());
- assert(node->finish() > begin.position);
- assert(begin.position + to_erase <= node->finish());
-
- node->remove_values_ignore_children(begin.position, to_erase,
- mutable_allocator());
-
- size_ -= to_erase;
-
- return rebalance_after_delete(begin);
-}
-
-template <typename P>
template <typename K>
auto btree<P>::erase_unique(const K &key) -> size_type {
const iterator iter = internal_find(key);
@@ -2207,7 +2230,7 @@ auto btree<P>::erase_multi(const K &key) -> size_type {
template <typename P>
void btree<P>::clear() {
if (!empty()) {
- internal_clear(root());
+ node_type::clear_and_delete(root(), mutable_allocator());
}
mutable_root() = EmptyNode();
rightmost_ = EmptyNode();
@@ -2215,20 +2238,20 @@ void btree<P>::clear() {
}
template <typename P>
-void btree<P>::swap(btree &x) {
+void btree<P>::swap(btree &other) {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_swap::value) {
// Note: `root_` also contains the allocator and the key comparator.
- swap(root_, x.root_);
+ swap(root_, other.root_);
} else {
// It's undefined behavior if the allocators are unequal here.
- assert(allocator() == x.allocator());
- swap(mutable_root(), x.mutable_root());
- swap(*mutable_key_comp(), *x.mutable_key_comp());
+ assert(allocator() == other.allocator());
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
}
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
}
template <typename P>
@@ -2348,12 +2371,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
template <typename P>
void btree<P>::merge_nodes(node_type *left, node_type *right) {
left->merge(right, mutable_allocator());
- if (right->leaf()) {
- if (rightmost_ == right) rightmost_ = left;
- delete_leaf_node(right);
- } else {
- delete_internal_node(right);
- }
+ if (rightmost_ == right) rightmost_ = left;
}
template <typename P>
@@ -2410,21 +2428,20 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
template <typename P>
void btree<P>::try_shrink() {
- if (root()->count() > 0) {
+ node_type *orig_root = root();
+ if (orig_root->count() > 0) {
return;
}
// Deleted the last item on the root node, shrink the height of the tree.
- if (root()->leaf()) {
+ if (orig_root->leaf()) {
assert(size() == 0);
- delete_leaf_node(root());
- mutable_root() = EmptyNode();
- rightmost_ = EmptyNode();
+ mutable_root() = rightmost_ = EmptyNode();
} else {
- node_type *child = root()->start_child();
+ node_type *child = orig_root->start_child();
child->make_root();
- delete_internal_node(root());
mutable_root() = child;
}
+ node_type::clear_and_delete(orig_root, mutable_allocator());
}
template <typename P>
@@ -2452,7 +2469,8 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
--iter;
++iter.position;
}
- const int max_count = iter.node->max_count();
+ const field_type max_count = iter.node->max_count();
+ allocator_type *alloc = mutable_allocator();
if (iter.node->count() == max_count) {
// Make room in the leaf for the new item.
if (max_count < kNodeValues) {
@@ -2461,16 +2479,20 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
assert(iter.node == root());
iter.node =
new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count));
- iter.node->swap(root(), mutable_allocator());
- delete_leaf_node(root());
- mutable_root() = iter.node;
- rightmost_ = iter.node;
+ // Transfer the values from the old root to the new root.
+ node_type *old_root = root();
+ node_type *new_root = iter.node;
+ new_root->transfer_n(old_root->count(), new_root->start(),
+ old_root->start(), old_root, alloc);
+ new_root->set_finish(old_root->finish());
+ old_root->set_finish(old_root->start());
+ node_type::clear_and_delete(old_root, alloc);
+ mutable_root() = rightmost_ = new_root;
} else {
rebalance_or_split(&iter);
}
}
- iter.node->emplace_value(iter.position, mutable_allocator(),
- std::forward<Args>(args)...);
+ iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
++size_;
return iter;
}
@@ -2568,18 +2590,6 @@ auto btree<P>::internal_find(const K &key) const -> iterator {
}
template <typename P>
-void btree<P>::internal_clear(node_type *node) {
- if (!node->leaf()) {
- for (int i = node->start(); i <= node->finish(); ++i) {
- internal_clear(node->child(i));
- }
- delete_internal_node(node);
- } else {
- delete_leaf_node(node);
- }
-}
-
-template <typename P>
int btree<P>::internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const {
assert(node->count() > 0);
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index f2e4c3a5..137614f8 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -68,10 +68,10 @@ class btree_container {
explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {}
- btree_container(const btree_container &x) = default;
- btree_container(btree_container &&x) noexcept = default;
- btree_container &operator=(const btree_container &x) = default;
- btree_container &operator=(btree_container &&x) noexcept(
+ btree_container(const btree_container &other) = default;
+ btree_container(btree_container &&other) noexcept = default;
+ btree_container &operator=(const btree_container &other) = default;
+ btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default;
// Iterator routines.
@@ -154,7 +154,7 @@ class btree_container {
public:
// Utility routines.
void clear() { tree_.clear(); }
- void swap(btree_container &x) { tree_.swap(x.tree_); }
+ void swap(btree_container &other) { tree_.swap(other.tree_); }
void verify() const { tree_.verify(); }
// Size routines.
@@ -257,42 +257,40 @@ class btree_set_container : public btree_container<Tree> {
}
// Insertion routines.
- std::pair<iterator, bool> insert(const value_type &x) {
- return this->tree_.insert_unique(params_type::key(x), x);
+ std::pair<iterator, bool> insert(const value_type &v) {
+ return this->tree_.insert_unique(params_type::key(v), v);
}
- std::pair<iterator, bool> insert(value_type &&x) {
- return this->tree_.insert_unique(params_type::key(x), std::move(x));
+ std::pair<iterator, bool> insert(value_type &&v) {
+ return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
template <typename... Args>
std::pair<iterator, bool> emplace(Args &&... args) {
init_type v(std::forward<Args>(args)...);
return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
- iterator insert(const_iterator position, const value_type &x) {
+ iterator insert(const_iterator hint, const value_type &v) {
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(x), x)
+ .insert_hint_unique(iterator(hint), params_type::key(v), v)
.first;
}
- iterator insert(const_iterator position, value_type &&x) {
+ iterator insert(const_iterator hint, value_type &&v) {
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(x),
- std::move(x))
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename... Args>
- iterator emplace_hint(const_iterator position, Args &&... args) {
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
init_type v(std::forward<Args>(args)...);
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(v),
- std::move(v))
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
- this->tree_.insert_iterator_unique(b, e);
+ this->tree_.insert_iterator_unique(b, e, 0);
}
void insert(std::initializer_list<init_type> init) {
- this->tree_.insert_iterator_unique(init.begin(), init.end());
+ this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
}
insert_return_type insert(node_type &&node) {
if (!node) return {this->end(), false, node_type()};
@@ -316,6 +314,8 @@ class btree_set_container : public btree_container<Tree> {
}
// Deletion routines.
+ // TODO(ezb): we should support heterogeneous comparators that have different
+ // behavior for K!=key_type.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_unique(key);
@@ -392,111 +392,72 @@ class btree_map_container : public btree_set_container<Tree> {
// Insertion routines.
// Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments.
- // Note: when we call `std::forward<M>(obj)` twice, it's safe because
- // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
- // `ret.second` is false.
- template <class M>
- std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
- if (!ret.second) ret.first->second = obj;
- return ret;
+ template <typename K = key_type, class M>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
+ const M &obj) {
+ return insert_or_assign_impl(k, obj);
}
- template <class M, key_type * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, std::move(k), obj);
- if (!ret.second) ret.first->second = obj;
- return ret;
+ template <typename K = key_type, class M, K * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_impl(std::forward<K>(k), obj);
}
- template <class M, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, k, std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret;
+ template <typename K = key_type, class M, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_impl(k, std::forward<M>(obj));
}
- template <class M, key_type * = nullptr, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret;
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
}
- template <class M>
- iterator insert_or_assign(const_iterator position, const key_type &k,
+ template <typename K = key_type, class M>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
const M &obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_hint_unique(iterator(position), k, k, obj);
- if (!ret.second) ret.first->second = obj;
- return ret.first;
+ return insert_or_assign_hint_impl(hint, k, obj);
}
- template <class M, key_type * = nullptr>
- iterator insert_or_assign(const_iterator position, key_type &&k,
- const M &obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, std::move(k), obj);
- if (!ret.second) ret.first->second = obj;
- return ret.first;
+ template <typename K = key_type, class M, K * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
}
- template <class M, M * = nullptr>
- iterator insert_or_assign(const_iterator position, const key_type &k,
- M &&obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, k, std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret.first;
+ template <typename K = key_type, class M, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
}
- template <class M, key_type * = nullptr, M * = nullptr>
- iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, std::move(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret.first;
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k),
+ std::forward<M>(obj));
}
- template <typename... Args>
- std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) {
- return this->tree_.insert_unique(
- k, std::piecewise_construct, std::forward_as_tuple(k),
- std::forward_as_tuple(std::forward<Args>(args)...));
+
+ template <typename K = key_type, typename... Args,
+ typename absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
}
- template <typename... Args>
- std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) {
- // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
- // and then using `k` unsequenced. This is safe because the move is into a
- // forwarding reference and insert_unique guarantees that `key` is never
- // referenced after consuming `args`.
- const key_type &key_ref = k;
- return this->tree_.insert_unique(
- key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)),
- std::forward_as_tuple(std::forward<Args>(args)...));
+ template <typename K = key_type, typename... Args,
+ typename absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
- template <typename... Args>
- iterator try_emplace(const_iterator hint, const key_type &k,
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, const key_arg<K> &k,
Args &&... args) {
- return this->tree_
- .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
- std::forward_as_tuple(k),
- std::forward_as_tuple(std::forward<Args>(args)...))
- .first;
+ return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
}
- template <typename... Args>
- iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) {
- // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
- // and then using `k` unsequenced. This is safe because the move is into a
- // forwarding reference and insert_hint_unique guarantees that `key` is
- // never referenced after consuming `args`.
- const key_type &key_ref = k;
- return this->tree_
- .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct,
- std::forward_as_tuple(std::move(k)),
- std::forward_as_tuple(std::forward<Args>(args)...))
- .first;
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+ return try_emplace_hint_impl(hint, std::forward<K>(k),
+ std::forward<Args>(args)...);
}
- mapped_type &operator[](const key_type &k) {
+
+ template <typename K = key_type>
+ mapped_type &operator[](const key_arg<K> &k) {
return try_emplace(k).first->second;
}
- mapped_type &operator[](key_type &&k) {
- return try_emplace(std::move(k)).first->second;
+ template <typename K = key_type>
+ mapped_type &operator[](key_arg<K> &&k) {
+ return try_emplace(std::forward<K>(k)).first->second;
}
template <typename K = key_type>
@@ -513,6 +474,40 @@ class btree_map_container : public btree_set_container<Tree> {
base_internal::ThrowStdOutOfRange("absl::btree_map::at");
return it->second;
}
+
+ private:
+ // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+ // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+ // `ret.second` is false.
+ template <class K, class M>
+ std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class K, class M>
+ iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
+ return this->tree_.insert_unique(
+ k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+ template <class K, class... Args>
+ iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
+ return this->tree_
+ .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...))
+ .first;
+ }
};
// A common base class for btree_multiset and btree_multimap.
@@ -562,15 +557,15 @@ class btree_multiset_container : public btree_container<Tree> {
}
// Insertion routines.
- iterator insert(const value_type &x) { return this->tree_.insert_multi(x); }
- iterator insert(value_type &&x) {
- return this->tree_.insert_multi(std::move(x));
+ iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
+ iterator insert(value_type &&v) {
+ return this->tree_.insert_multi(std::move(v));
}
- iterator insert(const_iterator position, const value_type &x) {
- return this->tree_.insert_hint_multi(iterator(position), x);
+ iterator insert(const_iterator hint, const value_type &v) {
+ return this->tree_.insert_hint_multi(iterator(hint), v);
}
- iterator insert(const_iterator position, value_type &&x) {
- return this->tree_.insert_hint_multi(iterator(position), std::move(x));
+ iterator insert(const_iterator hint, value_type &&v) {
+ return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
@@ -584,9 +579,9 @@ class btree_multiset_container : public btree_container<Tree> {
return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
}
template <typename... Args>
- iterator emplace_hint(const_iterator position, Args &&... args) {
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
return this->tree_.insert_hint_multi(
- iterator(position), init_type(std::forward<Args>(args)...));
+ iterator(hint), init_type(std::forward<Args>(args)...));
}
iterator insert(node_type &&node) {
if (!node) return this->end();
diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h
index 5037d803..030e9d4a 100644
--- a/absl/container/internal/common.h
+++ b/absl/container/internal/common.h
@@ -138,6 +138,7 @@ class node_handle<Policy, PolicyTraits, Alloc,
absl::void_t<typename Policy::mapped_type>>
: public node_handle_base<PolicyTraits, Alloc> {
using Base = node_handle_base<PolicyTraits, Alloc>;
+ using slot_type = typename PolicyTraits::slot_type;
public:
using key_type = typename Policy::key_type;
@@ -145,8 +146,11 @@ class node_handle<Policy, PolicyTraits, Alloc,
constexpr node_handle() {}
- auto key() const -> decltype(PolicyTraits::key(this->slot())) {
- return PolicyTraits::key(this->slot());
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key. Otherwise, we provide const access.
+ auto key() const
+ -> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
+ return PolicyTraits::mutable_key(this->slot());
}
mapped_type& mapped() const {
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index 4bfe92fd..02bfd03f 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -169,9 +169,33 @@ constexpr bool ShouldAnyUseBase() {
}
template <typename T, typename V>
-using TupleMoveConstructible = typename std::conditional<
- std::is_reference<T>::value, std::is_convertible<V, T>,
- std::is_constructible<T, V&&>>::type;
+using TupleElementMoveConstructible =
+ typename std::conditional<std::is_reference<T>::value,
+ std::is_convertible<V, T>,
+ std::is_constructible<T, V&&>>::type;
+
+template <bool SizeMatches, class T, class... Vs>
+struct TupleMoveConstructible : std::false_type {};
+
+template <class... Ts, class... Vs>
+struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
+ : std::integral_constant<
+ bool, absl::conjunction<
+ TupleElementMoveConstructible<Ts, Vs&&>...>::value> {};
+
+template <typename T>
+struct compressed_tuple_size;
+
+template <typename... Es>
+struct compressed_tuple_size<CompressedTuple<Es...>>
+ : public std::integral_constant<std::size_t, sizeof...(Es)> {};
+
+template <class T, class... Vs>
+struct TupleItemsMoveConstructible
+ : std::integral_constant<
+ bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
+ sizeof...(Vs),
+ T, Vs...>::value> {};
} // namespace internal_compressed_tuple
@@ -217,17 +241,18 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
explicit constexpr CompressedTuple(const Ts&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
- template <typename... Vs,
+ template <typename First, typename... Vs,
absl::enable_if_t<
absl::conjunction<
// Ensure we are not hiding default copy/move constructors.
absl::negation<std::is_same<void(CompressedTuple),
- void(absl::decay_t<Vs>...)>>,
- internal_compressed_tuple::TupleMoveConstructible<
- Ts, Vs&&>...>::value,
+ void(absl::decay_t<First>)>>,
+ internal_compressed_tuple::TupleItemsMoveConstructible<
+ CompressedTuple<Ts...>, First, Vs...>>::value,
bool> = true>
- explicit constexpr CompressedTuple(Vs&&... base)
+ explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place,
+ absl::forward<First>(first),
absl::forward<Vs>(base)...) {}
template <int I>
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 1dae12db..62a7483e 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -277,11 +277,11 @@ TEST(CompressedTupleTest, Nested) {
TEST(CompressedTupleTest, Reference) {
int i = 7;
- std::string s = "Very long std::string that goes in the heap";
+ std::string s = "Very long string that goes in the heap";
CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
// Sanity check. We should have not moved from `s`
- EXPECT_EQ(s, "Very long std::string that goes in the heap");
+ EXPECT_EQ(s, "Very long string that goes in the heap");
EXPECT_EQ(x.get<0>(), x.get<1>());
EXPECT_NE(&x.get<0>(), &x.get<1>());
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index d24b0f84..e67529ec 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -15,28 +15,34 @@
#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
-#ifdef MEMORY_SANITIZER
-#include <sanitizer/msan_interface.h>
-#endif
-
#include <cassert>
#include <cstddef>
#include <memory>
+#include <new>
#include <tuple>
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+template <size_t Alignment>
+struct alignas(Alignment) AlignedType {};
+
// Allocates at least n bytes aligned to the specified alignment.
// Alignment must be a power of 2. It must be positive.
//
@@ -48,11 +54,14 @@ template <size_t Alignment, class Alloc>
void* Allocate(Alloc* alloc, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
- struct alignas(Alignment) M {};
+ using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
- A mem_alloc(*alloc);
- void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment");
return p;
@@ -64,11 +73,14 @@ template <size_t Alignment, class Alloc>
void Deallocate(Alloc* alloc, void* p, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
- struct alignas(Alignment) M {};
+ using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
- A mem_alloc(*alloc);
- AT::deallocate(mem_alloc, static_cast<M*>(p),
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ AT::deallocate(my_mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M));
}
@@ -205,10 +217,10 @@ DecomposeValue(F&& f, Arg&& arg) {
// Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s);
#endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_poison(m, s);
#endif
(void)m;
@@ -216,10 +228,10 @@ inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
}
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_unpoison(m, s);
#endif
(void)m;
@@ -246,8 +258,8 @@ namespace memory_internal {
// type, which is non-portable.
template <class Pair, class = std::true_type>
struct OffsetOf {
- static constexpr size_t kFirst = -1;
- static constexpr size_t kSecond = -1;
+ static constexpr size_t kFirst = static_cast<size_t>(-1);
+ static constexpr size_t kSecond = static_cast<size_t>(-1);
};
template <class Pair>
@@ -316,11 +328,12 @@ union map_slot_type {
map_slot_type() {}
~map_slot_type() = delete;
using value_type = std::pair<const K, V>;
- using mutable_value_type = std::pair<K, V>;
+ using mutable_value_type =
+ std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
value_type value;
mutable_value_type mutable_value;
- K key;
+ absl::remove_const_t<K> key;
};
template <class K, class V>
@@ -346,6 +359,20 @@ struct map_slot_policy {
return slot->value;
}
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ static K& mutable_key(slot_type* slot) {
+ // Still check for kMutableKeys so that we can avoid calling std::launder
+ // unless necessary because it can interfere with optimizations.
+ return kMutableKeys::value ? slot->key
+ : *std::launder(const_cast<K*>(
+ std::addressof(slot->value.first)));
+ }
+#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
+ static const K& mutable_key(slot_type* slot) { return key(slot); }
+#endif
+
static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first;
}
@@ -424,13 +451,6 @@ struct map_slot_policy {
std::move(src->value));
}
}
-
- template <class Allocator>
- static void move(Allocator* alloc, slot_type* first, slot_type* last,
- slot_type* result) {
- for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
- move(alloc, src, dest);
- }
};
} // namespace container_internal
diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc
index 7942c7be..6a7fcd29 100644
--- a/absl/container/internal/container_memory_test.cc
+++ b/absl/container/internal/container_memory_test.cc
@@ -16,10 +16,13 @@
#include <cstdint>
#include <tuple>
+#include <typeindex>
+#include <typeinfo>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -27,6 +30,11 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
+using ::absl::test_internal::CopyableMovableInstance;
+using ::absl::test_internal::InstanceTracker;
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Gt;
using ::testing::Pair;
TEST(Memory, AlignmentLargerThanBase) {
@@ -45,6 +53,39 @@ TEST(Memory, AlignmentSmallerThanBase) {
Deallocate<2>(&alloc, mem, 3);
}
+std::map<std::type_index, int>& AllocationMap() {
+ static auto* map = new std::map<std::type_index, int>;
+ return *map;
+}
+
+template <typename T>
+struct TypeCountingAllocator {
+ TypeCountingAllocator() = default;
+ template <typename U>
+ TypeCountingAllocator(const TypeCountingAllocator<U>&) {} // NOLINT
+
+ using value_type = T;
+
+ T* allocate(size_t n, const void* = nullptr) {
+ AllocationMap()[typeid(T)] += n;
+ return std::allocator<T>().allocate(n);
+ }
+ void deallocate(T* p, std::size_t n) {
+ AllocationMap()[typeid(T)] -= n;
+ return std::allocator<T>().deallocate(p, n);
+ }
+};
+
+TEST(Memory, AllocateDeallocateMatchType) {
+ TypeCountingAllocator<int> alloc;
+ void* mem = Allocate<1>(&alloc, 1);
+ // Verify that it was allocated
+ EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0))));
+ Deallocate<1>(&alloc, mem, 1);
+ // Verify that the deallocation matched.
+ EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0)));
+}
+
class Fixture : public ::testing::Test {
using Alloc = std::allocator<std::string>;
@@ -184,6 +225,31 @@ TEST(DecomposePair, NotDecomposable) {
std::make_tuple(0.5)));
}
+TEST(MapSlotPolicy, ConstKeyAndValue) {
+ using slot_policy = map_slot_policy<const CopyableMovableInstance,
+ const CopyableMovableInstance>;
+ using slot_type = typename slot_policy::slot_type;
+
+ union Slots {
+ Slots() {}
+ ~Slots() {}
+ slot_type slots[100];
+ } slots;
+
+ std::allocator<
+ std::pair<const CopyableMovableInstance, const CopyableMovableInstance>>
+ alloc;
+ InstanceTracker tracker;
+ slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1),
+ CopyableMovableInstance(1));
+ for (int i = 0; i < 99; ++i) {
+ slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]);
+ }
+ slot_policy::destroy(&alloc, &slots.slots[99]);
+
+ EXPECT_EQ(tracker.copies(), 0);
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
index 9efdc662..927cf082 100644
--- a/absl/container/internal/counting_allocator.h
+++ b/absl/container/internal/counting_allocator.h
@@ -15,7 +15,6 @@
#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
-#include <cassert>
#include <cstdint>
#include <memory>
@@ -31,33 +30,63 @@ namespace container_internal {
// containers - that chain of allocators uses the same state and is
// thus easier to query for aggregate allocation information.
template <typename T>
-class CountingAllocator : public std::allocator<T> {
+class CountingAllocator {
public:
- using Alloc = std::allocator<T>;
- using pointer = typename Alloc::pointer;
- using size_type = typename Alloc::size_type;
+ using Allocator = std::allocator<T>;
+ using AllocatorTraits = std::allocator_traits<Allocator>;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
- CountingAllocator() : bytes_used_(nullptr) {}
- explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+ CountingAllocator() = default;
+ explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
+ CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
+ : bytes_used_(bytes_used), instance_count_(instance_count) {}
template <typename U>
CountingAllocator(const CountingAllocator<U>& x)
- : Alloc(x), bytes_used_(x.bytes_used_) {}
+ : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
- pointer allocate(size_type n,
- std::allocator<void>::const_pointer hint = nullptr) {
- assert(bytes_used_ != nullptr);
- *bytes_used_ += n * sizeof(T);
- return Alloc::allocate(n, hint);
+ pointer allocate(
+ size_type n,
+ typename AllocatorTraits::const_void_pointer hint = nullptr) {
+ Allocator allocator;
+ pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ += n * sizeof(T);
+ }
+ return ptr;
}
void deallocate(pointer p, size_type n) {
- Alloc::deallocate(p, n);
- assert(bytes_used_ != nullptr);
- *bytes_used_ -= n * sizeof(T);
+ Allocator allocator;
+ AllocatorTraits::deallocate(allocator, p, n);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ -= n * sizeof(T);
+ }
}
- template<typename U>
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ Allocator allocator;
+ AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
+ if (instance_count_ != nullptr) {
+ *instance_count_ += 1;
+ }
+ }
+
+ template <typename U>
+ void destroy(U* p) {
+ Allocator allocator;
+ AllocatorTraits::destroy(allocator, p);
+ if (instance_count_ != nullptr) {
+ *instance_count_ -= 1;
+ }
+ }
+
+ template <typename U>
class rebind {
public:
using other = CountingAllocator<U>;
@@ -65,7 +94,8 @@ class CountingAllocator : public std::allocator<T> {
friend bool operator==(const CountingAllocator& a,
const CountingAllocator& b) {
- return a.bytes_used_ == b.bytes_used_;
+ return a.bytes_used_ == b.bytes_used_ &&
+ a.instance_count_ == b.instance_count_;
}
friend bool operator!=(const CountingAllocator& a,
@@ -73,7 +103,8 @@ class CountingAllocator : public std::allocator<T> {
return !(a == b);
}
- int64_t* bytes_used_;
+ int64_t* bytes_used_ = nullptr;
+ int64_t* instance_count_ = nullptr;
};
} // namespace container_internal
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
index 401ddf4d..0683422a 100644
--- a/absl/container/internal/hash_function_defaults.h
+++ b/absl/container/internal/hash_function_defaults.h
@@ -53,6 +53,7 @@
#include "absl/base/config.h"
#include "absl/hash/hash.h"
+#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -72,6 +73,9 @@ struct StringHash {
size_t operator()(absl::string_view v) const {
return absl::Hash<absl::string_view>{}(v);
}
+ size_t operator()(const absl::Cord& v) const {
+ return absl::Hash<absl::Cord>{}(v);
+ }
};
// Supports heterogeneous lookup for string-like elements.
@@ -82,6 +86,15 @@ struct StringHashEq {
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs == rhs;
}
+ bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
};
};
@@ -89,6 +102,8 @@ template <>
struct HashEq<std::string> : StringHashEq {};
template <>
struct HashEq<absl::string_view> : StringHashEq {};
+template <>
+struct HashEq<absl::Cord> : StringHashEq {};
// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index 2eefc7e0..59576b8e 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -19,6 +19,9 @@
#include <utility>
#include "gtest/gtest.h"
+#include "absl/random/random.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -203,10 +206,91 @@ TYPED_TEST(HashPointer, Works) {
EXPECT_NE(hash(&dummy), hash(cuptr));
}
+TEST(EqCord, Works) {
+ hash_default_eq<absl::Cord> eq;
+ const absl::string_view a_string_view = "a";
+ const absl::Cord a_cord(a_string_view);
+ const absl::string_view b_string_view = "b";
+ const absl::Cord b_cord(b_string_view);
+
+ EXPECT_TRUE(eq(a_cord, a_cord));
+ EXPECT_TRUE(eq(a_cord, a_string_view));
+ EXPECT_TRUE(eq(a_string_view, a_cord));
+ EXPECT_FALSE(eq(a_cord, b_cord));
+ EXPECT_FALSE(eq(a_cord, b_string_view));
+ EXPECT_FALSE(eq(b_string_view, a_cord));
+}
+
+TEST(HashCord, Works) {
+ hash_default_hash<absl::Cord> hash;
+ const absl::string_view a_string_view = "a";
+ const absl::Cord a_cord(a_string_view);
+ const absl::string_view b_string_view = "b";
+ const absl::Cord b_cord(b_string_view);
+
+ EXPECT_EQ(hash(a_cord), hash(a_cord));
+ EXPECT_EQ(hash(b_cord), hash(b_cord));
+ EXPECT_EQ(hash(a_string_view), hash(a_cord));
+ EXPECT_EQ(hash(b_string_view), hash(b_cord));
+ EXPECT_EQ(hash(absl::Cord("")), hash(""));
+ EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view()));
+
+ EXPECT_NE(hash(a_cord), hash(b_cord));
+ EXPECT_NE(hash(a_cord), hash(b_string_view));
+ EXPECT_NE(hash(a_string_view), hash(b_cord));
+ EXPECT_NE(hash(a_string_view), hash(b_string_view));
+}
+
+void NoOpReleaser(absl::string_view data, void* arg) {}
+
+TEST(HashCord, FragmentedCordWorks) {
+ hash_default_hash<absl::Cord> hash;
+ absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"});
+ EXPECT_FALSE(c.TryFlat().has_value());
+ EXPECT_EQ(hash(c), hash("abc"));
+}
+
+TEST(HashCord, FragmentedLongCordWorks) {
+ hash_default_hash<absl::Cord> hash;
+ // Crete some large strings which do not fit on the stack.
+ std::string a(65536, 'a');
+ std::string b(65536, 'b');
+ absl::Cord c = absl::MakeFragmentedCord({a, b});
+ EXPECT_FALSE(c.TryFlat().has_value());
+ EXPECT_EQ(hash(c), hash(a + b));
+}
+
+TEST(HashCord, RandomCord) {
+ hash_default_hash<absl::Cord> hash;
+ auto bitgen = absl::BitGen();
+ for (int i = 0; i < 1000; ++i) {
+ const int number_of_segments = absl::Uniform(bitgen, 0, 10);
+ std::vector<std::string> pieces;
+ for (size_t s = 0; s < number_of_segments; ++s) {
+ std::string str;
+ str.resize(absl::Uniform(bitgen, 0, 4096));
+ // MSVC needed the explicit return type in the lambda.
+ std::generate(str.begin(), str.end(), [&]() -> char {
+ return static_cast<char>(absl::Uniform<unsigned char>(bitgen));
+ });
+ pieces.push_back(str);
+ }
+ absl::Cord c = absl::MakeFragmentedCord(pieces);
+ EXPECT_EQ(hash(c), hash(std::string(c)));
+ }
+}
+
// Cartesian product of (std::string, absl::string_view)
-// with (std::string, absl::string_view, const char*).
+// with (std::string, absl::string_view, const char*, absl::Cord).
using StringTypesCartesianProduct = Types<
// clang-format off
+ std::pair<absl::Cord, std::string>,
+ std::pair<absl::Cord, absl::string_view>,
+ std::pair<absl::Cord, absl::Cord>,
+ std::pair<absl::Cord, const char*>,
+
+ std::pair<std::string, absl::Cord>,
+ std::pair<absl::string_view, absl::Cord>,
std::pair<absl::string_view, std::string>,
std::pair<absl::string_view, absl::string_view>,
@@ -253,11 +337,11 @@ ABSL_NAMESPACE_END
} // namespace absl
enum Hash : size_t {
- kStd = 0x2, // std::hash
+ kStd = 0x1, // std::hash
#ifdef _MSC_VER
kExtension = kStd, // In MSVC, std::hash == ::hash
#else // _MSC_VER
- kExtension = 0x4, // ::hash (GCC extension)
+ kExtension = 0x2, // ::hash (GCC extension)
#endif // _MSC_VER
};
diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc
index 75c4db6c..59cc5aac 100644
--- a/absl/container/internal/hash_generator_testing.cc
+++ b/absl/container/internal/hash_generator_testing.cc
@@ -41,8 +41,10 @@ class RandomDeviceSeedSeq {
} // namespace
std::mt19937_64* GetSharedRng() {
- RandomDeviceSeedSeq seed_seq;
- static auto* rng = new std::mt19937_64(seed_seq);
+ static auto* rng = [] {
+ RandomDeviceSeedSeq seed_seq;
+ return new std::mt19937_64(seed_seq);
+ }();
return rng;
}
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h
index 3e1209c6..46c97b18 100644
--- a/absl/container/internal/hash_policy_traits.h
+++ b/absl/container/internal/hash_policy_traits.h
@@ -17,6 +17,7 @@
#include <cstddef>
#include <memory>
+#include <new>
#include <type_traits>
#include <utility>
@@ -29,15 +30,34 @@ namespace container_internal {
// Defines how slots are initialized/destroyed/moved.
template <class Policy, class = void>
struct hash_policy_traits {
+ // The type of the keys stored in the hashtable.
+ using key_type = typename Policy::key_type;
+
private:
struct ReturnKey {
- // We return `Key` here.
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ template <class Key,
+ absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
+ static key_type& Impl(Key&& k, int) {
+ return *std::launder(
+ const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
+ }
+#endif
+
+ template <class Key>
+ static Key Impl(Key&& k, char) {
+ return std::forward<Key>(k);
+ }
+
// When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map.
template <class Key, class... Args>
- Key operator()(Key&& k, const Args&...) const {
- return std::forward<Key>(k);
+ auto operator()(Key&& k, const Args&...) const
+ -> decltype(Impl(std::forward<Key>(k), 0)) {
+ return Impl(std::forward<Key>(k), 0);
}
};
@@ -52,9 +72,6 @@ struct hash_policy_traits {
// The actual object stored in the hash table.
using slot_type = typename Policy::slot_type;
- // The type of the keys stored in the hashtable.
- using key_type = typename Policy::key_type;
-
// The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details.
@@ -156,7 +173,7 @@ struct hash_policy_traits {
// Returns the "key" portion of the slot.
// Used for node handle manipulation.
template <class P = Policy>
- static auto key(slot_type* slot)
+ static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot))) {
return P::apply(ReturnKey(), element(slot));
}
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 56447251..e4484fbb 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -67,6 +67,7 @@ void HashtablezInfo::PrepareForSampling() {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
+ num_rehashes.store(0, std::memory_order_relaxed);
max_probe_length.store(0, std::memory_order_relaxed);
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);
@@ -226,7 +227,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
// SwissTables probe in groups of 16, so scale this to count items probes and
// not offset from desired.
size_t probe_length = distance_from_desired;
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 34d5e572..394348da 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -73,6 +73,7 @@ struct HashtablezInfo {
std::atomic<size_t> capacity;
std::atomic<size_t> size;
std::atomic<size_t> num_erases;
+ std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
@@ -98,13 +99,18 @@ struct HashtablezInfo {
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
#endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_rehashes.store(
+ 1 + info->num_rehashes.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
}
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
@@ -113,7 +119,8 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) {
// This is a clear, reset the total/num_erases too.
- RecordRehashSlow(info, 0);
+ info->total_probe_length.store(0, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
}
}
@@ -122,12 +129,21 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
inline void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed);
- info->num_erases.fetch_add(1, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_erases.store(
+ 1 + info->num_erases.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
}
HashtablezInfo* SampleSlow(int64_t* next_sample);
void UnsampleSlow(HashtablezInfo* info);
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() : info_(nullptr) {}
@@ -179,19 +195,27 @@ class HashtablezInfoHandle {
friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_;
};
+#else
+// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
+// be removed by the linker, in order to reduce the binary size.
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() = default;
+ explicit HashtablezInfoHandle(std::nullptr_t) {}
-#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
-#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
+ inline void RecordRehash(size_t /*total_probe_length*/) {}
+ inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
+ inline void RecordErase() {}
-#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \
- !defined(ABSL_CONSUME_DLL)
-#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE
-#endif
+ friend inline void swap(HashtablezInfoHandle& /*lhs*/,
+ HashtablezInfoHandle& /*rhs*/) {}
+};
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
-#endif // ABSL_PER_THREAD_TLS
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 36f5ccdd..8d10a1e9 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -29,7 +29,7 @@
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
constexpr int kProbeLength = 16;
#else
constexpr int kProbeLength = 8;
@@ -38,6 +38,7 @@ constexpr int kProbeLength = 8;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer {
public:
static bool IsSampled(const HashtablezInfoHandle& h) {
@@ -46,6 +47,13 @@ class HashtablezInfoHandlePeer {
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
};
+#else
+class HashtablezInfoHandlePeer {
+ public:
+ static bool IsSampled(const HashtablezInfoHandle&) { return false; }
+ static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
+};
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
namespace {
using ::absl::synchronization_internal::ThreadPool;
@@ -76,6 +84,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@@ -95,6 +104,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@@ -167,9 +177,10 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.size.load(), 2);
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 1);
}
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
@@ -213,7 +224,6 @@ TEST(HashtablezSamplerTest, Sample) {
}
EXPECT_NEAR(sample_rate, 0.01, 0.005);
}
-#endif
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = HashtablezSampler::Global();
@@ -243,6 +253,8 @@ TEST(HashtablezSamplerTest, Handle) {
});
EXPECT_FALSE(found);
}
+#endif
+
TEST(HashtablezSamplerTest, Registration) {
HashtablezSampler sampler;
diff --git a/absl/container/internal/have_sse.h b/absl/container/internal/have_sse.h
index 43414418..e75e1a16 100644
--- a/absl/container/internal/have_sse.h
+++ b/absl/container/internal/have_sse.h
@@ -16,33 +16,34 @@
#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#ifndef SWISSTABLE_HAVE_SSE2
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#if defined(__SSE2__) || \
(defined(_MSC_VER) && \
(defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define SWISSTABLE_HAVE_SSE2 1
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
#else
-#define SWISSTABLE_HAVE_SSE2 0
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
#endif
#endif
-#ifndef SWISSTABLE_HAVE_SSSE3
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
#ifdef __SSSE3__
-#define SWISSTABLE_HAVE_SSSE3 1
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
#else
-#define SWISSTABLE_HAVE_SSSE3 0
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
#endif
#endif
-#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
+ !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#error "Bad configuration!"
#endif
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#include <emmintrin.h>
#endif
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
#include <tmmintrin.h>
#endif
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index 69cc85dd..23367833 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -163,6 +163,7 @@
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
+
#include <ostream>
#include <string>
#include <tuple>
@@ -170,15 +171,16 @@
#include <typeinfo>
#include <utility>
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
+#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
#if defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
#endif
@@ -614,7 +616,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds");
(void)p;
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index 8f3628a1..757272f1 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -17,6 +17,7 @@
// We need ::max_align_t because some libstdc++ versions don't provide
// std::max_align_t
#include <stddef.h>
+
#include <cstdint>
#include <memory>
#include <sstream>
@@ -24,6 +25,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/types/span.h"
@@ -1314,7 +1316,7 @@ struct Region {
};
void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
for (size_t i = 0; i != n; ++i) {
EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
}
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index ca7be8d8..ec13a2f7 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -104,6 +104,7 @@
#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
@@ -121,6 +122,16 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+template <typename AllocType>
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+ std::true_type /* propagate_on_container_swap */) {
+ using std::swap;
+ swap(lhs, rhs);
+}
+template <typename AllocType>
+void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
+ std::false_type /* propagate_on_container_swap */) {}
+
template <size_t Width>
class probe_seq {
public:
@@ -168,10 +179,14 @@ struct IsDecomposable<
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
-constexpr bool IsNoThrowSwappable() {
+constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
}
+template <class T>
+constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
+ return false;
+}
template <typename T>
int TrailingZeros(T x) {
@@ -312,7 +327,7 @@ inline bool IsFull(ctrl_t c) { return c >= 0; }
inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -346,7 +361,7 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of empty slots.
BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
// This only works because kEmpty is -128.
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
@@ -372,7 +387,7 @@ struct GroupSse2Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -384,7 +399,7 @@ struct GroupSse2Impl {
__m128i ctrl;
};
-#endif // SWISSTABLE_HAVE_SSE2
+#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -438,7 +453,7 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
using Group = GroupSse2Impl;
#else
using Group = GroupPortableImpl;
@@ -496,6 +511,18 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
}
+inline void AssertIsFull(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
+inline void AssertIsValid(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -510,7 +537,8 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
-// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
+// Allocator: an Allocator
+// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
@@ -616,7 +644,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- assert_is_full();
+ AssertIsFull(ctrl_);
return PolicyTraits::element(slot_);
}
@@ -625,7 +653,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- assert_is_full();
+ AssertIsFull(ctrl_);
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -639,8 +667,8 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
- a.assert_is_valid();
- b.assert_is_valid();
+ AssertIsValid(a.ctrl_);
+ AssertIsValid(b.ctrl_);
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -648,24 +676,19 @@ class raw_hash_set {
}
private:
- iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
- iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
-
- void assert_is_full() const { assert(IsFull(*ctrl_)); }
- void assert_is_valid() const {
- assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel);
+ iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+ // This assumption helps the compiler know that any non-end iterator is
+ // not equal to any end iterator.
+ ABSL_INTERNAL_ASSUME(ctrl != nullptr);
}
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
- // ctrl is not necessarily aligned to Group::kWidth. It is also likely
- // to read past the space for ctrl bytes and into slots. This is ok
- // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
- // is no way to read outside the combined slot array.
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
+ if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
}
ctrl_t* ctrl_ = nullptr;
@@ -907,12 +930,12 @@ class raw_hash_set {
it.skip_empty_or_deleted();
return it;
}
- iterator end() { return {ctrl_ + capacity_}; }
+ iterator end() { return {}; }
const_iterator begin() const {
return const_cast<raw_hash_set*>(this)->begin();
}
- const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
+ const_iterator end() const { return {}; }
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
@@ -1171,7 +1194,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- it.assert_is_full();
+ AssertIsFull(it.ctrl_);
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1205,7 +1228,7 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- position.inner_.assert_is_full();
+ AssertIsFull(position.inner_.ctrl_);
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1222,8 +1245,8 @@ class raw_hash_set {
void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
- (!AllocTraits::propagate_on_container_swap::value ||
- IsNoThrowSwappable<allocator_type>())) {
+ IsNoThrowSwappable<allocator_type>(
+ typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_);
@@ -1233,12 +1256,8 @@ class raw_hash_set {
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
swap(infoz_, that.infoz_);
- if (AllocTraits::propagate_on_container_swap::value) {
- swap(alloc_ref(), that.alloc_ref());
- } else {
- // If the allocators do not compare equal it is officially undefined
- // behavior. We choose to do nothing.
- }
+ SwapAlloc(alloc_ref(), that.alloc_ref(),
+ typename AllocTraits::propagate_on_container_swap{});
}
void rehash(size_t n) {
@@ -1308,6 +1327,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
}
template <class K = key_type>
@@ -1659,8 +1679,8 @@ class raw_hash_set {
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
- assert(seq.index() < capacity_ && "full table!");
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
}
@@ -1691,6 +1711,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index 7ac4b9f7..1a036085 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -424,6 +424,77 @@ TEST_F(PropagateOnAll, Swap) {
EXPECT_EQ(0, it->num_copies());
}
+// This allocator is similar to std::pmr::polymorphic_allocator.
+// Note the disabled assignment.
+template <class T>
+class PAlloc {
+ template <class>
+ friend class PAlloc;
+
+ public:
+ // types
+ using value_type = T;
+
+ // traits
+ using propagate_on_container_swap = std::false_type;
+
+ PAlloc() noexcept = default;
+ explicit PAlloc(size_t id) noexcept : id_(id) {}
+ PAlloc(const PAlloc&) noexcept = default;
+ PAlloc& operator=(const PAlloc&) noexcept = delete;
+
+ template <class U>
+ PAlloc(const PAlloc<U>& that) noexcept : id_(that.id_) {} // NOLINT
+
+ template <class U>
+ struct rebind {
+ using other = PAlloc<U>;
+ };
+
+ constexpr PAlloc select_on_container_copy_construction() const { return {}; }
+
+ // public member functions
+ T* allocate(size_t) { return new T; }
+ void deallocate(T* p, size_t) { delete p; }
+
+ friend bool operator==(const PAlloc& a, const PAlloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); }
+
+ private:
+ size_t id_ = std::numeric_limits<size_t>::max();
+};
+
+TEST(NoPropagateOn, Swap) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(PA{2});
+ swap(t1, t2);
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+
+TEST(NoPropagateOn, CopyConstruct) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(t1);
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA());
+}
+
+TEST(NoPropagateOn, Assignment) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(PA{2});
+ t1 = t2;
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index a96ae68a..f5ae83c4 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -26,6 +26,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h"
@@ -1666,9 +1667,9 @@ TEST(Nodes, EmptyNodeType) {
}
TEST(Nodes, ExtractInsert) {
- constexpr char k0[] = "Very long std::string zero.";
- constexpr char k1[] = "Very long std::string one.";
- constexpr char k2[] = "Very long std::string two.";
+ constexpr char k0[] = "Very long string zero.";
+ constexpr char k1[] = "Very long string one.";
+ constexpr char k2[] = "Very long string two.";
StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
EXPECT_THAT(t,
UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
@@ -1791,11 +1792,11 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- constexpr char kDeathMsg[] = "IsFull";
+ constexpr char kDeathMsg[] = "Invalid operation on iterator";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest, Sample) {
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
@@ -1816,7 +1817,7 @@ TEST(RawHashSamplerTest, Sample) {
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
}
-#endif // ABSL_HASHTABLEZ_SAMPLER
+#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
// Enable the feature even if the prod default is off.
@@ -1839,7 +1840,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
0.00, 0.001);
}
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
t.reserve(5);
@@ -1863,7 +1864,7 @@ TEST(Sanitizer, PoisoningOnErase) {
t.erase(0);
EXPECT_TRUE(__asan_address_is_poisoned(&v));
}
-#endif // ADDRESS_SANITIZER
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
} // namespace
} // namespace container_internal
diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h
index b8c513f1..8c9ca779 100644
--- a/absl/container/internal/unordered_map_modifiers_test.h
+++ b/absl/container/internal/unordered_map_modifiers_test.h
@@ -286,6 +286,8 @@ class UniquePtrModifiersTest : public ::testing::Test {
}
};
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);
+
TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
// Test that we do not move from rvalue arguments if an insertion does not