summaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/BUILD.bazel23
-rw-r--r--absl/container/CMakeLists.txt18
-rw-r--r--absl/container/btree_benchmark.cc52
-rw-r--r--absl/container/btree_map.h16
-rw-r--r--absl/container/btree_set.h6
-rw-r--r--absl/container/btree_test.cc272
-rw-r--r--absl/container/btree_test.h11
-rw-r--r--absl/container/fixed_array.h67
-rw-r--r--absl/container/fixed_array_exception_safety_test.cc3
-rw-r--r--absl/container/fixed_array_test.cc119
-rw-r--r--absl/container/flat_hash_map.h8
-rw-r--r--absl/container/flat_hash_map_test.cc29
-rw-r--r--absl/container/flat_hash_set.h3
-rw-r--r--absl/container/flat_hash_set_test.cc12
-rw-r--r--absl/container/inlined_vector.h69
-rw-r--r--absl/container/inlined_vector_benchmark.cc2
-rw-r--r--absl/container/inlined_vector_test.cc17
-rw-r--r--absl/container/internal/btree.h790
-rw-r--r--absl/container/internal/btree_container.h229
-rw-r--r--absl/container/internal/common.h8
-rw-r--r--absl/container/internal/compressed_tuple.h41
-rw-r--r--absl/container/internal/compressed_tuple_test.cc4
-rw-r--r--absl/container/internal/container_memory.h78
-rw-r--r--absl/container/internal/container_memory_test.cc66
-rw-r--r--absl/container/internal/counting_allocator.h69
-rw-r--r--absl/container/internal/hash_function_defaults.h15
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc90
-rw-r--r--absl/container/internal/hash_generator_testing.cc6
-rw-r--r--absl/container/internal/hash_policy_traits.h31
-rw-r--r--absl/container/internal/hashtablez_sampler.cc3
-rw-r--r--absl/container/internal/hashtablez_sampler.h46
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc18
-rw-r--r--absl/container/internal/have_sse.h19
-rw-r--r--absl/container/internal/layout.h12
-rw-r--r--absl/container/internal/layout_test.cc4
-rw-r--r--absl/container/internal/raw_hash_set.h89
-rw-r--r--absl/container/internal/raw_hash_set_allocator_test.cc71
-rw-r--r--absl/container/internal/raw_hash_set_test.cc17
-rw-r--r--absl/container/internal/unordered_map_modifiers_test.h2
-rw-r--r--absl/container/node_hash_map.h14
-rw-r--r--absl/container/node_hash_map_test.cc15
-rw-r--r--absl/container/node_hash_set.h9
42 files changed, 1610 insertions, 863 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index f2217140..8e72ad03 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -14,7 +14,7 @@
# limitations under the License.
#
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
+load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
@@ -24,7 +24,7 @@ load(
package(default_visibility = ["//visibility:public"])
-licenses(["notice"]) # Apache 2.0
+licenses(["notice"])
cc_library(
name = "compressed_tuple",
@@ -60,6 +60,7 @@ cc_library(
deps = [
":compressed_tuple",
"//absl/algorithm",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:dynamic_annotations",
"//absl/base:throw_delegate",
@@ -73,7 +74,9 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":counting_allocator",
":fixed_array",
+ "//absl/base:config",
"//absl/base:exception_testing",
"//absl/hash:hash_testing",
"//absl/memory",
@@ -153,6 +156,7 @@ cc_test(
":counting_allocator",
":inlined_vector",
":test_instance_tracker",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:exception_testing",
"//absl/base:raw_logging_internal",
@@ -255,6 +259,7 @@ cc_test(
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
+ "//absl/base:raw_logging_internal",
"//absl/types:any",
"@com_google_googletest//:gtest_main",
],
@@ -288,6 +293,7 @@ cc_test(
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
+ "//absl/base:raw_logging_internal",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
@@ -363,7 +369,9 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ "//absl/base:config",
"//absl/memory",
+ "//absl/meta:type_traits",
"//absl/utility",
],
)
@@ -376,6 +384,7 @@ cc_test(
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":container_memory",
+ ":test_instance_tracker",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
@@ -390,6 +399,7 @@ cc_library(
"//absl/base:config",
"//absl/hash",
"//absl/strings",
+ "//absl/strings:cord",
],
)
@@ -402,7 +412,10 @@ cc_test(
deps = [
":hash_function_defaults",
"//absl/hash",
+ "//absl/random",
"//absl/strings",
+ "//absl/strings:cord",
+ "//absl/strings:cord_test_helpers",
"@com_google_googletest//:gtest_main",
],
)
@@ -609,6 +622,7 @@ cc_test(
":hashtable_debug",
":raw_hash_set",
"//absl/base",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/strings",
@@ -636,6 +650,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/meta:type_traits",
"//absl/strings",
@@ -654,6 +669,7 @@ cc_test(
visibility = ["//visibility:private"],
deps = [
":layout",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/types:span",
@@ -828,6 +844,7 @@ cc_library(
"//absl/memory",
"//absl/meta:type_traits",
"//absl/strings",
+ "//absl/strings:cord",
"//absl/types:compare",
"//absl/utility",
],
@@ -844,6 +861,7 @@ cc_library(
":btree",
":flat_hash_set",
"//absl/strings",
+ "//absl/strings:cord",
"//absl/time",
],
)
@@ -895,6 +913,7 @@ cc_binary(
"//absl/flags:flag",
"//absl/hash",
"//absl/memory",
+ "//absl/strings:cord",
"//absl/strings:str_format",
"//absl/time",
"@com_github_google_benchmark//:benchmark_main",
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index e702ba85..eb202c45 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -40,6 +40,7 @@ absl_cc_library(
absl::compare
absl::compressed_tuple
absl::container_memory
+ absl::cord
absl::core_headers
absl::layout
absl::memory
@@ -60,6 +61,7 @@ absl_cc_library(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::btree
+ absl::cord
absl::flat_hash_set
absl::strings
absl::time
@@ -129,6 +131,7 @@ absl_cc_library(
DEPS
absl::compressed_tuple
absl::algorithm
+ absl::config
absl::core_headers
absl::dynamic_annotations
absl::throw_delegate
@@ -145,6 +148,8 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::fixed_array
+ absl::counting_allocator
+ absl::config
absl::exception_testing
absl::hash_testing
absl::memory
@@ -219,6 +224,7 @@ absl_cc_test(
absl::counting_allocator
absl::inlined_vector
absl::test_instance_tracker
+ absl::config
absl::core_headers
absl::exception_testing
absl::hash_testing
@@ -299,6 +305,7 @@ absl_cc_test(
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
absl::any
+ absl::raw_logging_internal
gmock_main
)
@@ -335,6 +342,7 @@ absl_cc_test(
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
absl::memory
+ absl::raw_logging_internal
absl::strings
gmock_main
)
@@ -416,7 +424,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::config
absl::memory
+ absl::type_traits
absl::utility
PUBLIC
)
@@ -431,6 +441,7 @@ absl_cc_test(
DEPS
absl::container_memory
absl::strings
+ absl::test_instance_tracker
gmock_main
)
@@ -443,6 +454,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
+ absl::cord
absl::hash
absl::strings
PUBLIC
@@ -456,8 +468,11 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::cord
+ absl::cord_test_helpers
absl::hash_function_defaults
absl::hash
+ absl::random_random
absl::strings
gmock_main
)
@@ -683,6 +698,7 @@ absl_cc_test(
absl::hashtable_debug
absl::raw_hash_set
absl::base
+ absl::config
absl::core_headers
absl::raw_logging_internal
absl::strings
@@ -711,6 +727,7 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::config
absl::core_headers
absl::meta
absl::strings
@@ -728,6 +745,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::layout
+ absl::config
absl::core_headers
absl::raw_logging_internal
absl::span
diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc
index 4af92f9f..46798676 100644
--- a/absl/container/btree_benchmark.cc
+++ b/absl/container/btree_benchmark.cc
@@ -36,6 +36,7 @@
#include "absl/flags/flag.h"
#include "absl/hash/hash.h"
#include "absl/memory/memory.h"
+#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "benchmark/benchmark.h"
@@ -133,6 +134,27 @@ void BM_InsertEnd(benchmark::State& state) {
}
}
+// Benchmark inserting the first few elements in a container. In b-tree, this is
+// when the root node grows.
+template <typename T>
+void BM_InsertSmall(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+
+ const int kSize = 8;
+ std::vector<V> values = GenerateValues<V>(kSize);
+ T container;
+
+ while (state.KeepRunningBatch(kSize)) {
+ for (int i = 0; i < kSize; ++i) {
+ benchmark::DoNotOptimize(container.insert(values[i]));
+ }
+ state.PauseTiming();
+ // Do not measure the time it takes to clear the container.
+ container.clear();
+ state.ResumeTiming();
+ }
+}
+
template <typename T>
void BM_LookupImpl(benchmark::State& state, bool sorted) {
using V = typename remove_pair_const<typename T::value_type>::type;
@@ -438,6 +460,7 @@ using StdString = std::string;
STL_ORDERED_TYPES(int32_t);
STL_ORDERED_TYPES(int64_t);
STL_ORDERED_TYPES(StdString);
+STL_ORDERED_TYPES(Cord);
STL_ORDERED_TYPES(Time);
#define STL_UNORDERED_TYPES(value) \
@@ -458,6 +481,8 @@ STL_ORDERED_TYPES(Time);
using stl_unordered_multimap_##value = \
std::unordered_multimap<value, intptr_t, hash>
+STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash<absl::Cord>);
+
STL_UNORDERED_TYPES(int32_t);
STL_UNORDERED_TYPES(int64_t);
STL_UNORDERED_TYPES(StdString);
@@ -478,6 +503,7 @@ STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash<absl::Time>);
BTREE_TYPES(int32_t);
BTREE_TYPES(int64_t);
BTREE_TYPES(StdString);
+BTREE_TYPES(Cord);
BTREE_TYPES(Time);
#define MY_BENCHMARK4(type, func) \
@@ -488,6 +514,7 @@ BTREE_TYPES(Time);
MY_BENCHMARK4(type, Insert); \
MY_BENCHMARK4(type, InsertSorted); \
MY_BENCHMARK4(type, InsertEnd); \
+ MY_BENCHMARK4(type, InsertSmall); \
MY_BENCHMARK4(type, Lookup); \
MY_BENCHMARK4(type, FullLookup); \
MY_BENCHMARK4(type, Delete); \
@@ -526,6 +553,7 @@ BTREE_TYPES(Time);
MY_BENCHMARK(int32_t);
MY_BENCHMARK(int64_t);
MY_BENCHMARK(StdString);
+MY_BENCHMARK(Cord);
MY_BENCHMARK(Time);
// Define a type whose size and cost of moving are independently customizable.
@@ -538,19 +566,19 @@ struct BigType {
BigType() : BigType(0) {}
explicit BigType(int x) { std::iota(values.begin(), values.end(), x); }
- void Copy(const BigType& x) {
- for (int i = 0; i < Size && i < Copies; ++i) values[i] = x.values[i];
+ void Copy(const BigType& other) {
+ for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i];
// If Copies > Size, do extra copies.
for (int i = Size, idx = 0; i < Copies; ++i) {
- int64_t tmp = x.values[idx];
+ int64_t tmp = other.values[idx];
benchmark::DoNotOptimize(tmp);
idx = idx + 1 == Size ? 0 : idx + 1;
}
}
- BigType(const BigType& x) { Copy(x); }
- BigType& operator=(const BigType& x) {
- Copy(x);
+ BigType(const BigType& other) { Copy(other); }
+ BigType& operator=(const BigType& other) {
+ Copy(other);
return *this;
}
@@ -641,14 +669,14 @@ struct BigTypePtr {
explicit BigTypePtr(int x) {
ptr = absl::make_unique<BigType<Size, Size>>(x);
}
- BigTypePtr(const BigTypePtr& x) {
- ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr);
+ BigTypePtr(const BigTypePtr& other) {
+ ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
}
- BigTypePtr(BigTypePtr&& x) noexcept = default;
- BigTypePtr& operator=(const BigTypePtr& x) {
- ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr);
+ BigTypePtr(BigTypePtr&& other) noexcept = default;
+ BigTypePtr& operator=(const BigTypePtr& other) {
+ ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
}
- BigTypePtr& operator=(BigTypePtr&& x) noexcept = default;
+ BigTypePtr& operator=(BigTypePtr&& other) noexcept = default;
bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; }
bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; }
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index d23f4ee5..abc09b0a 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -185,7 +185,7 @@ class btree_map
// template <typename K> size_type erase(const K& key):
//
// Erases the element with the matching key, if it exists, returning the
- // number of elements erased.
+ // number of elements erased (0 or 1).
using Base::erase;
// btree_map::insert()
@@ -318,13 +318,18 @@ class btree_map
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_map`
// does not contain an element with a matching key, this function returns an
// empty node handle.
//
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ //
// NOTE: In this context, `node_type` refers to the C++17 concept of a
// move-only type that owns and provides access to the elements in associative
// containers (https://en.cppreference.com/w/cpp/container/node_handle).
@@ -645,13 +650,18 @@ class btree_multimap
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_multimap`
// does not contain an element with a matching key, this function returns an
// empty node handle.
//
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ //
// NOTE: In this context, `node_type` refers to the C++17 concept of a
// move-only type that owns and provides access to the elements in associative
// containers (https://en.cppreference.com/w/cpp/container/node_handle).
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index 127fb940..21ef0a03 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -183,7 +183,7 @@ class btree_set
// template <typename K> size_type erase(const K& key):
//
// Erases the element with the matching key, if it exists, returning the
- // number of elements erased.
+ // number of elements erased (0 or 1).
using Base::erase;
// btree_set::insert()
@@ -263,7 +263,7 @@ class btree_set
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_set`
@@ -567,7 +567,7 @@ class btree_multiset
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_multiset`
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index 9edf38f9..1bfa0c20 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -15,6 +15,7 @@
#include "absl/container/btree_test.h"
#include <cstdint>
+#include <limits>
#include <map>
#include <memory>
#include <stdexcept>
@@ -52,6 +53,7 @@ using ::absl::test_internal::MovableOnlyInstance;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
+using ::testing::IsNull;
using ::testing::Pair;
template <typename T, typename U>
@@ -89,8 +91,8 @@ class base_checker {
public:
base_checker() : const_tree_(tree_) {}
- base_checker(const base_checker &x)
- : tree_(x.tree_), const_tree_(tree_), checker_(x.checker_) {}
+ base_checker(const base_checker &other)
+ : tree_(other.tree_), const_tree_(tree_), checker_(other.checker_) {}
template <typename InputIterator>
base_checker(InputIterator b, InputIterator e)
: tree_(b, e), const_tree_(tree_), checker_(b, e) {}
@@ -124,11 +126,11 @@ class base_checker {
}
return tree_iter;
}
- void value_check(const value_type &x) {
+ void value_check(const value_type &v) {
typename KeyOfValue<typename TreeType::key_type,
typename TreeType::value_type>::type key_of_value;
- const key_type &key = key_of_value(x);
- CheckPairEquals(*find(key), x);
+ const key_type &key = key_of_value(v);
+ CheckPairEquals(*find(key), v);
lower_bound(key);
upper_bound(key);
equal_range(key);
@@ -187,9 +189,9 @@ class base_checker {
return res;
}
- base_checker &operator=(const base_checker &x) {
- tree_ = x.tree_;
- checker_ = x.checker_;
+ base_checker &operator=(const base_checker &other) {
+ tree_ = other.tree_;
+ checker_ = other.checker_;
return *this;
}
@@ -250,9 +252,9 @@ class base_checker {
tree_.clear();
checker_.clear();
}
- void swap(base_checker &x) {
- tree_.swap(x.tree_);
- checker_.swap(x.checker_);
+ void swap(base_checker &other) {
+ tree_.swap(other.tree_);
+ checker_.swap(other.checker_);
}
void verify() const {
@@ -323,28 +325,28 @@ class unique_checker : public base_checker<TreeType, CheckerType> {
public:
unique_checker() : super_type() {}
- unique_checker(const unique_checker &x) : super_type(x) {}
+ unique_checker(const unique_checker &other) : super_type(other) {}
template <class InputIterator>
unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
unique_checker &operator=(const unique_checker &) = default;
// Insertion routines.
- std::pair<iterator, bool> insert(const value_type &x) {
+ std::pair<iterator, bool> insert(const value_type &v) {
int size = this->tree_.size();
std::pair<typename CheckerType::iterator, bool> checker_res =
- this->checker_.insert(x);
- std::pair<iterator, bool> tree_res = this->tree_.insert(x);
+ this->checker_.insert(v);
+ std::pair<iterator, bool> tree_res = this->tree_.insert(v);
CheckPairEquals(*tree_res.first, *checker_res.first);
EXPECT_EQ(tree_res.second, checker_res.second);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + tree_res.second);
return tree_res;
}
- iterator insert(iterator position, const value_type &x) {
+ iterator insert(iterator position, const value_type &v) {
int size = this->tree_.size();
std::pair<typename CheckerType::iterator, bool> checker_res =
- this->checker_.insert(x);
- iterator tree_res = this->tree_.insert(position, x);
+ this->checker_.insert(v);
+ iterator tree_res = this->tree_.insert(position, v);
CheckPairEquals(*tree_res, *checker_res.first);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + checker_res.second);
@@ -371,25 +373,25 @@ class multi_checker : public base_checker<TreeType, CheckerType> {
public:
multi_checker() : super_type() {}
- multi_checker(const multi_checker &x) : super_type(x) {}
+ multi_checker(const multi_checker &other) : super_type(other) {}
template <class InputIterator>
multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
multi_checker &operator=(const multi_checker &) = default;
// Insertion routines.
- iterator insert(const value_type &x) {
+ iterator insert(const value_type &v) {
int size = this->tree_.size();
- auto checker_res = this->checker_.insert(x);
- iterator tree_res = this->tree_.insert(x);
+ auto checker_res = this->checker_.insert(v);
+ iterator tree_res = this->tree_.insert(v);
CheckPairEquals(*tree_res, *checker_res);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + 1);
return tree_res;
}
- iterator insert(iterator position, const value_type &x) {
+ iterator insert(iterator position, const value_type &v) {
int size = this->tree_.size();
- auto checker_res = this->checker_.insert(x);
- iterator tree_res = this->tree_.insert(position, x);
+ auto checker_res = this->checker_.insert(v);
+ iterator tree_res = this->tree_.insert(position, v);
CheckPairEquals(*tree_res, *checker_res);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + 1);
@@ -812,10 +814,12 @@ void MapTest() {
TEST(Btree, set_int32) { SetTest<int32_t>(); }
TEST(Btree, set_int64) { SetTest<int64_t>(); }
TEST(Btree, set_string) { SetTest<std::string>(); }
+TEST(Btree, set_cord) { SetTest<absl::Cord>(); }
TEST(Btree, set_pair) { SetTest<std::pair<int, int>>(); }
TEST(Btree, map_int32) { MapTest<int32_t>(); }
TEST(Btree, map_int64) { MapTest<int64_t>(); }
TEST(Btree, map_string) { MapTest<std::string>(); }
+TEST(Btree, map_cord) { MapTest<absl::Cord>(); }
TEST(Btree, map_pair) { MapTest<std::pair<int, int>>(); }
template <typename K, int N = 256>
@@ -847,10 +851,12 @@ void MultiMapTest() {
TEST(Btree, multiset_int32) { MultiSetTest<int32_t>(); }
TEST(Btree, multiset_int64) { MultiSetTest<int64_t>(); }
TEST(Btree, multiset_string) { MultiSetTest<std::string>(); }
+TEST(Btree, multiset_cord) { MultiSetTest<absl::Cord>(); }
TEST(Btree, multiset_pair) { MultiSetTest<std::pair<int, int>>(); }
TEST(Btree, multimap_int32) { MultiMapTest<int32_t>(); }
TEST(Btree, multimap_int64) { MultiMapTest<int64_t>(); }
TEST(Btree, multimap_string) { MultiMapTest<std::string>(); }
+TEST(Btree, multimap_cord) { MultiMapTest<absl::Cord>(); }
TEST(Btree, multimap_pair) { MultiMapTest<std::pair<int, int>>(); }
struct CompareIntToString {
@@ -1268,6 +1274,8 @@ TEST(Btree, KeyCompareToAdapter) {
AssertKeyCompareToAdapted<std::less<absl::string_view>, absl::string_view>();
AssertKeyCompareToAdapted<std::greater<absl::string_view>,
absl::string_view>();
+ AssertKeyCompareToAdapted<std::less<absl::Cord>, absl::Cord>();
+ AssertKeyCompareToAdapted<std::greater<absl::Cord>, absl::Cord>();
AssertKeyCompareToNotAdapted<std::less<int>, int>();
AssertKeyCompareToNotAdapted<std::greater<int>, int>();
}
@@ -1337,6 +1345,12 @@ class BtreeNodePeer {
constexpr static size_t GetNumValuesPerNode() {
return btree_node<typename Set::params_type>::kNodeValues;
}
+
+ template <typename Set>
+ constexpr static size_t GetMaxFieldType() {
+ return std::numeric_limits<
+ typename btree_node<typename Set::params_type>::field_type>::max();
+ }
};
namespace {
@@ -1537,7 +1551,7 @@ TEST(Btree, MapAt) {
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(map.at(3), std::out_of_range);
#else
- EXPECT_DEATH(map.at(3), "absl::btree_map::at");
+ EXPECT_DEATH_IF_SUPPORTED(map.at(3), "absl::btree_map::at");
#endif
}
@@ -2126,11 +2140,11 @@ TEST(Btree, UserProvidedKeyCompareToComparators) {
TEST(Btree, TryEmplaceBasicTest) {
absl::btree_map<int, std::string> m;
- // Should construct a std::string from the literal.
+ // Should construct a string from the literal.
m.try_emplace(1, "one");
EXPECT_EQ(1, m.size());
- // Try other std::string constructors and const lvalue key.
+ // Try other string constructors and const lvalue key.
const int key(42);
m.try_emplace(key, 3, 'a');
m.try_emplace(2, std::string("two"));
@@ -2398,6 +2412,208 @@ TEST(Btree, BitfieldArgument) {
m[n];
}
+TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) {
+ const absl::string_view names[] = {"n1", "n2"};
+
+ absl::btree_set<std::string> name_set1{std::begin(names), std::end(names)};
+ EXPECT_THAT(name_set1, ElementsAreArray(names));
+
+ absl::btree_set<std::string> name_set2;
+ name_set2.insert(std::begin(names), std::end(names));
+ EXPECT_THAT(name_set2, ElementsAreArray(names));
+}
+
+// A type that is explicitly convertible from int and counts constructor calls.
+struct ConstructorCounted {
+ explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; }
+ bool operator==(int other) const { return i == other; }
+
+ int i;
+ static int constructor_calls;
+};
+int ConstructorCounted::constructor_calls = 0;
+
+struct ConstructorCountedCompare {
+ bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; }
+ bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; }
+ bool operator()(const ConstructorCounted &a,
+ const ConstructorCounted &b) const {
+ return a.i < b.i;
+ }
+ using is_transparent = void;
+};
+
+TEST(Btree,
+ SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
+ const int i[] = {0, 1, 1};
+ ConstructorCounted::constructor_calls = 0;
+
+ absl::btree_set<ConstructorCounted, ConstructorCountedCompare> set{
+ std::begin(i), std::end(i)};
+ EXPECT_THAT(set, ElementsAre(0, 1));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+
+ set.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(set, ElementsAre(0, 1));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+}
+
+TEST(Btree,
+ SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
+ const int i[] = {0, 1};
+
+ absl::btree_set<std::vector<void *>> s1{std::begin(i), std::end(i)};
+ EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
+
+ absl::btree_set<std::vector<void *>> s2;
+ s2.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
+}
+
+// libstdc++ included with GCC 4.9 has a bug in the std::pair constructors that
+// prevents explicit conversions between pair types.
+// We only run this test for the libstdc++ from GCC 7 or newer because we can't
+// reliably check the libstdc++ version prior to that release.
+#if !defined(__GLIBCXX__) || \
+ (defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7)
+TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) {
+ const std::pair<absl::string_view, int> names[] = {{"n1", 1}, {"n2", 2}};
+
+ absl::btree_map<std::string, int> name_map1{std::begin(names),
+ std::end(names)};
+ EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
+
+ absl::btree_map<std::string, int> name_map2;
+ name_map2.insert(std::begin(names), std::end(names));
+ EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
+}
+
+TEST(Btree,
+ MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
+ const std::pair<int, int> i[] = {{0, 1}, {1, 2}, {1, 3}};
+ ConstructorCounted::constructor_calls = 0;
+
+ absl::btree_map<ConstructorCounted, int, ConstructorCountedCompare> map{
+ std::begin(i), std::end(i)};
+ EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+
+ map.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+}
+
+TEST(Btree,
+ MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
+ const std::pair<int, int> i[] = {{0, 1}, {1, 2}};
+
+ absl::btree_map<std::vector<void *>, int> m1{std::begin(i), std::end(i)};
+ EXPECT_THAT(m1,
+ ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
+
+ absl::btree_map<std::vector<void *>, int> m2;
+ m2.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(m2,
+ ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
+}
+
+TEST(Btree, HeterogeneousTryEmplace) {
+ absl::btree_map<std::string, int> m;
+ std::string s = "key";
+ absl::string_view sv = s;
+ m.try_emplace(sv, 1);
+ EXPECT_EQ(m[s], 1);
+
+ m.try_emplace(m.end(), sv, 2);
+ EXPECT_EQ(m[s], 1);
+}
+
+TEST(Btree, HeterogeneousOperatorMapped) {
+ absl::btree_map<std::string, int> m;
+ std::string s = "key";
+ absl::string_view sv = s;
+ m[sv] = 1;
+ EXPECT_EQ(m[s], 1);
+
+ m[sv] = 2;
+ EXPECT_EQ(m[s], 2);
+}
+
+TEST(Btree, HeterogeneousInsertOrAssign) {
+ absl::btree_map<std::string, int> m;
+ std::string s = "key";
+ absl::string_view sv = s;
+ m.insert_or_assign(sv, 1);
+ EXPECT_EQ(m[s], 1);
+
+ m.insert_or_assign(m.end(), sv, 2);
+ EXPECT_EQ(m[s], 2);
+}
+#endif
+
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(Btree, NodeHandleMutableKeyAccess) {
+ {
+ absl::btree_map<std::string, std::string> map;
+
+ map["key1"] = "mapped";
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
+ }
+ // Also for multimap.
+ {
+ absl::btree_multimap<std::string, std::string> map;
+
+ map.emplace("key1", "mapped");
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
+ }
+}
+#endif
+
+struct MultiKey {
+ int i1;
+ int i2;
+};
+
+struct MultiKeyComp {
+ using is_transparent = void;
+ bool operator()(const MultiKey a, const MultiKey b) const {
+ if (a.i1 != b.i1) return a.i1 < b.i1;
+ return a.i2 < b.i2;
+ }
+ bool operator()(const int a, const MultiKey b) const { return a < b.i1; }
+ bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
+};
+
+// Test that when there's a heterogeneous comparator that behaves differently
+// for some heterogeneous operators, we get equal_range() right.
+TEST(Btree, MultiKeyEqualRange) {
+ absl::btree_set<MultiKey, MultiKeyComp> set;
+
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 100; ++j) {
+ set.insert({i, j});
+ }
+ }
+
+ for (int i = 0; i < 100; ++i) {
+ auto equal_range = set.equal_range(i);
+ EXPECT_EQ(equal_range.first->i1, i);
+ EXPECT_EQ(equal_range.first->i2, 0);
+ EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i;
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/btree_test.h b/absl/container/btree_test.h
index 218ba41d..62490807 100644
--- a/absl/container/btree_test.h
+++ b/absl/container/btree_test.h
@@ -25,6 +25,7 @@
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
+#include "absl/strings/cord.h"
#include "absl/time/time.h"
namespace absl {
@@ -100,6 +101,16 @@ struct Generator<std::string> {
}
};
+template <>
+struct Generator<Cord> {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ Cord operator()(int i) const {
+ char buf[16];
+ return Cord(GenerateDigits(buf, i, maxval));
+ }
+};
+
template <typename T, typename U>
struct Generator<std::pair<T, U> > {
Generator<typename remove_pair_const<T>::type> tgen;
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index a9ce99ba..c8fe8d96 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -41,6 +41,7 @@
#include <type_traits>
#include "absl/algorithm/algorithm.h"
+#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/base/macros.h"
@@ -106,13 +107,13 @@ class FixedArray {
public:
using allocator_type = typename AllocatorTraits::allocator_type;
- using value_type = typename allocator_type::value_type;
- using pointer = typename allocator_type::pointer;
- using const_pointer = typename allocator_type::const_pointer;
- using reference = typename allocator_type::reference;
- using const_reference = typename allocator_type::const_reference;
- using size_type = typename allocator_type::size_type;
- using difference_type = typename allocator_type::difference_type;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
@@ -217,7 +218,7 @@ class FixedArray {
// Returns a reference the ith element of the fixed array.
// REQUIRES: 0 <= i < size()
reference operator[](size_type i) {
- assert(i < size());
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -225,7 +226,7 @@ class FixedArray {
// ith element of the fixed array.
// REQUIRES: 0 <= i < size()
const_reference operator[](size_type i) const {
- assert(i < size());
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -252,20 +253,32 @@ class FixedArray {
// FixedArray::front()
//
// Returns a reference to the first element of the fixed array.
- reference front() { return *begin(); }
+ reference front() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
// Overload of FixedArray::front() to return a reference to the first element
// of a fixed array of const values.
- const_reference front() const { return *begin(); }
+ const_reference front() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
// FixedArray::back()
//
// Returns a reference to the last element of the fixed array.
- reference back() { return *(end() - 1); }
+ reference back() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
// Overload of FixedArray::back() to return a reference to the last element
// of a fixed array of const values.
- const_reference back() const { return *(end() - 1); }
+ const_reference back() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
// FixedArray::begin()
//
@@ -410,15 +423,15 @@ class FixedArray {
void AnnotateConstruct(size_type n);
void AnnotateDestruct(size_type n);
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
void* RedzoneBegin() { return &redzone_begin_; }
void* RedzoneEnd() { return &redzone_end_ + 1; }
-#endif // ADDRESS_SANITIZER
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
private:
- ADDRESS_SANITIZER_REDZONE(redzone_begin_);
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
- ADDRESS_SANITIZER_REDZONE(redzone_end_);
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
};
class EmptyInlinedStorage {
@@ -491,22 +504,26 @@ constexpr typename FixedArray<T, N, A>::size_type
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
typename FixedArray<T, N, A>::size_type n) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return;
- ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n);
- ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin());
-#endif // ADDRESS_SANITIZER
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(),
+ data() + n);
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(),
+ RedzoneBegin());
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
typename FixedArray<T, N, A>::size_type n) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return;
- ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd());
- ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data());
-#endif // ADDRESS_SANITIZER
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n,
+ RedzoneEnd());
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(),
+ data());
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
ABSL_NAMESPACE_END
diff --git a/absl/container/fixed_array_exception_safety_test.cc b/absl/container/fixed_array_exception_safety_test.cc
index a5bb009d..e5f59299 100644
--- a/absl/container/fixed_array_exception_safety_test.cc
+++ b/absl/container/fixed_array_exception_safety_test.cc
@@ -150,8 +150,7 @@ TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) {
template <typename FixedArrT>
testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) {
- // Marked volatile to prevent optimization. Used for running asan tests.
- volatile int sum = 0;
+ int sum = 0;
for (const auto& thrower : *fixed_arr) {
sum += thrower.Get();
}
diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc
index c960fe51..49598e7a 100644
--- a/absl/container/fixed_array_test.cc
+++ b/absl/container/fixed_array_test.cc
@@ -27,7 +27,10 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h"
+#include "absl/base/options.h"
+#include "absl/container/internal/counting_allocator.h"
#include "absl/hash/hash_testing.h"
#include "absl/memory/memory.h"
@@ -188,6 +191,21 @@ TEST(FixedArrayTest, AtThrows) {
"failed bounds check");
}
+TEST(FixedArrayTest, Hardened) {
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+ absl::FixedArray<int> a = {1, 2, 3};
+ EXPECT_EQ(a[2], 3);
+ EXPECT_DEATH_IF_SUPPORTED(a[3], "");
+ EXPECT_DEATH_IF_SUPPORTED(a[-1], "");
+
+ absl::FixedArray<int> empty(0);
+ EXPECT_DEATH_IF_SUPPORTED(empty[0], "");
+ EXPECT_DEATH_IF_SUPPORTED(empty[-1], "");
+ EXPECT_DEATH_IF_SUPPORTED(empty.front(), "");
+ EXPECT_DEATH_IF_SUPPORTED(empty.back(), "");
+#endif
+}
+
TEST(FixedArrayRelationalsTest, EqualArrays) {
for (int i = 0; i < 10; ++i) {
absl::FixedArray<int, 5> a1(i);
@@ -622,70 +640,9 @@ TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) {
}
#endif // __GNUC__
-// This is a stateful allocator, but the state lives outside of the
-// allocator (in whatever test is using the allocator). This is odd
-// but helps in tests where the allocator is propagated into nested
-// containers - that chain of allocators uses the same state and is
-// thus easier to query for aggregate allocation information.
-template <typename T>
-class CountingAllocator : public std::allocator<T> {
- public:
- using Alloc = std::allocator<T>;
- using pointer = typename Alloc::pointer;
- using size_type = typename Alloc::size_type;
-
- CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {}
- explicit CountingAllocator(int64_t* b)
- : bytes_used_(b), instance_count_(nullptr) {}
- CountingAllocator(int64_t* b, int64_t* a)
- : bytes_used_(b), instance_count_(a) {}
-
- template <typename U>
- explicit CountingAllocator(const CountingAllocator<U>& x)
- : Alloc(x),
- bytes_used_(x.bytes_used_),
- instance_count_(x.instance_count_) {}
-
- pointer allocate(size_type n, const void* const hint = nullptr) {
- assert(bytes_used_ != nullptr);
- *bytes_used_ += n * sizeof(T);
- return Alloc::allocate(n, hint);
- }
-
- void deallocate(pointer p, size_type n) {
- Alloc::deallocate(p, n);
- assert(bytes_used_ != nullptr);
- *bytes_used_ -= n * sizeof(T);
- }
-
- template <typename... Args>
- void construct(pointer p, Args&&... args) {
- Alloc::construct(p, absl::forward<Args>(args)...);
- if (instance_count_) {
- *instance_count_ += 1;
- }
- }
-
- void destroy(pointer p) {
- Alloc::destroy(p);
- if (instance_count_) {
- *instance_count_ -= 1;
- }
- }
-
- template <typename U>
- class rebind {
- public:
- using other = CountingAllocator<U>;
- };
-
- int64_t* bytes_used_;
- int64_t* instance_count_;
-};
-
TEST(AllocatorSupportTest, CountInlineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
@@ -706,7 +663,7 @@ TEST(AllocatorSupportTest, CountInlineAllocations) {
TEST(AllocatorSupportTest, CountOutoflineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
@@ -727,7 +684,7 @@ TEST(AllocatorSupportTest, CountOutoflineAllocations) {
TEST(AllocatorSupportTest, CountCopyInlineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
@@ -755,7 +712,7 @@ TEST(AllocatorSupportTest, CountCopyInlineAllocations) {
TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
@@ -787,7 +744,7 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
using testing::SizeIs;
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
{
@@ -811,16 +768,16 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
}
}
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
absl::FixedArray<int, 32> a(10);
int* raw = a.data();
raw[0] = 0;
raw[9] = 0;
- EXPECT_DEATH(raw[-2] = 0, "container-overflow");
- EXPECT_DEATH(raw[-1] = 0, "container-overflow");
- EXPECT_DEATH(raw[10] = 0, "container-overflow");
- EXPECT_DEATH(raw[31] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-2] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[10] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[31] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
@@ -828,10 +785,10 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
char* raw = a.data();
raw[0] = 0;
raw[11] = 0;
- EXPECT_DEATH(raw[-7] = 0, "container-overflow");
- EXPECT_DEATH(raw[-1] = 0, "container-overflow");
- EXPECT_DEATH(raw[12] = 0, "container-overflow");
- EXPECT_DEATH(raw[17] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-7] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[12] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[17] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
@@ -839,8 +796,8 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
uint64_t* raw = a.data();
raw[0] = 0;
raw[19] = 0;
- EXPECT_DEATH(raw[-1] = 0, "container-overflow");
- EXPECT_DEATH(raw[20] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[20] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
@@ -852,13 +809,13 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
// there is only a 8-byte red zone before the container range, so we only
// access the last 4 bytes of the struct to make sure it stays within the red
// zone.
- EXPECT_DEATH(raw[-1].z_ = 0, "container-overflow");
- EXPECT_DEATH(raw[10] = ThreeInts(), "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1].z_ = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[10] = ThreeInts(), "container-overflow");
// The actual size of storage is kDefaultBytes=256, 21*12 = 252,
// so reading raw[21] should still trigger the correct warning.
- EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow");
}
-#endif // ADDRESS_SANITIZER
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AbslHashValueWorks) {
using V = absl::FixedArray<int>;
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index fcb70d86..74def0df 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -234,7 +234,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// flat_hash_map::insert()
@@ -383,6 +384,11 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// key value and returns a node handle owning that extracted data. If the
// `flat_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
using Base::extract;
// flat_hash_map::merge()
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index 728b693a..89ec60c9 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -16,6 +16,7 @@
#include <memory>
+#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
@@ -34,6 +35,19 @@ using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
+// Check that absl::flat_hash_map works in a global constructor.
+struct BeforeMain {
+ BeforeMain() {
+ absl::flat_hash_map<int, int> x;
+ x.insert({1, 1});
+ ABSL_RAW_CHECK(x.find(0) == x.end(), "x should not contain 0");
+ auto it = x.find(1);
+ ABSL_RAW_CHECK(it != x.end(), "x should contain 1");
+ ABSL_RAW_CHECK(it->second, "1 should map to 1");
+ }
+};
+const BeforeMain before_main;
+
template <class K, class V>
using Map = flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual,
Alloc<std::pair<const K, V>>>;
@@ -253,6 +267,21 @@ TEST(FlatHashMap, EraseIf) {
}
}
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(FlatHashMap, NodeHandleMutableKeyAccess) {
+ flat_hash_map<std::string, std::string> map;
+
+ map["key1"] = "mapped";
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
+}
+#endif
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index 94be6e3d..81e145aa 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -227,7 +227,8 @@ class flat_hash_set
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// flat_hash_set::insert()
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index 40d7f85c..8f6f9944 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -16,6 +16,7 @@
#include <vector>
+#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
@@ -36,6 +37,17 @@ using ::testing::Pointee;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
+// Check that absl::flat_hash_set works in a global constructor.
+struct BeforeMain {
+ BeforeMain() {
+ absl::flat_hash_set<int> x;
+ x.insert(1);
+ ABSL_RAW_CHECK(!x.contains(0), "x should not contain 0");
+ ABSL_RAW_CHECK(x.contains(1), "x should contain 1");
+ }
+};
+const BeforeMain before_main;
+
template <class T>
using Set =
absl::flat_hash_set<T, StatefulTestingHash, StatefulTestingEqual, Alloc<T>>;
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 2388d471..90bb96e8 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -48,6 +48,7 @@
#include "absl/algorithm/algorithm.h"
#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/inlined_vector.h"
@@ -63,7 +64,7 @@ ABSL_NAMESPACE_BEGIN
// `std::vector` for use cases where the vector's size is sufficiently small
// that it can be inlined. If the inlined vector does grow beyond its estimated
// capacity, it will trigger an initial allocation on the heap, and will behave
-// as a `std:vector`. The API of the `absl::InlinedVector` within this file is
+// as a `std::vector`. The API of the `absl::InlinedVector` within this file is
// designed to cover the same API footprint as covered by `std::vector`.
template <typename T, size_t N, typename A = std::allocator<T>>
class InlinedVector {
@@ -307,16 +308,14 @@ class InlinedVector {
//
// Returns a `reference` to the `i`th element of the inlined vector.
reference operator[](size_type i) {
- assert(i < size());
-
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
// Overload of `InlinedVector::operator[](...)` that returns a
// `const_reference` to the `i`th element of the inlined vector.
const_reference operator[](size_type i) const {
- assert(i < size());
-
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -331,7 +330,6 @@ class InlinedVector {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type)` failed bounds check");
}
-
return data()[i];
}
@@ -345,7 +343,6 @@ class InlinedVector {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type) const` failed bounds check");
}
-
return data()[i];
}
@@ -353,34 +350,30 @@ class InlinedVector {
//
// Returns a `reference` to the first element of the inlined vector.
reference front() {
- assert(!empty());
-
- return at(0);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
}
// Overload of `InlinedVector::front()` that returns a `const_reference` to
// the first element of the inlined vector.
const_reference front() const {
- assert(!empty());
-
- return at(0);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
}
// `InlinedVector::back()`
//
// Returns a `reference` to the last element of the inlined vector.
reference back() {
- assert(!empty());
-
- return at(size() - 1);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
}
// Overload of `InlinedVector::back()` that returns a `const_reference` to the
// last element of the inlined vector.
const_reference back() const {
- assert(!empty());
-
- return at(size() - 1);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
}
// `InlinedVector::begin()`
@@ -531,7 +524,7 @@ class InlinedVector {
void assign(InputIterator first, InputIterator last) {
size_type i = 0;
for (; i < size() && first != last; ++i, static_cast<void>(++first)) {
- at(i) = *first;
+ data()[i] = *first;
}
erase(data() + i, data() + size());
@@ -542,9 +535,12 @@ class InlinedVector {
//
// Resizes the inlined vector to contain `n` elements.
//
- // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n`
// is larger than `size()`, new elements are value-initialized.
- void resize(size_type n) { storage_.Resize(DefaultValueAdapter(), n); }
+ void resize(size_type n) {
+ ABSL_HARDENING_ASSERT(n <= max_size());
+ storage_.Resize(DefaultValueAdapter(), n);
+ }
// Overload of `InlinedVector::resize(...)` that resizes the inlined vector to
// contain `n` elements.
@@ -552,6 +548,7 @@ class InlinedVector {
// NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
// is larger than `size()`, new elements are copied-constructed from `v`.
void resize(size_type n, const_reference v) {
+ ABSL_HARDENING_ASSERT(n <= max_size());
storage_.Resize(CopyValueAdapter(v), n);
}
@@ -573,8 +570,8 @@ class InlinedVector {
// of `v` starting at `pos`, returning an `iterator` pointing to the first of
// the newly inserted elements.
iterator insert(const_iterator pos, size_type n, const_reference v) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
if (ABSL_PREDICT_TRUE(n != 0)) {
value_type dealias = v;
@@ -600,8 +597,8 @@ class InlinedVector {
EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
iterator insert(const_iterator pos, ForwardIterator first,
ForwardIterator last) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
if (ABSL_PREDICT_TRUE(first != last)) {
return storage_.Insert(pos, IteratorValueAdapter<ForwardIterator>(first),
@@ -619,8 +616,8 @@ class InlinedVector {
template <typename InputIterator,
DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
size_type index = std::distance(cbegin(), pos);
for (size_type i = index; first != last; ++i, static_cast<void>(++first)) {
@@ -636,8 +633,8 @@ class InlinedVector {
// `pos`, returning an `iterator` pointing to the newly emplaced element.
template <typename... Args>
iterator emplace(const_iterator pos, Args&&... args) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
value_type dealias(std::forward<Args>(args)...);
return storage_.Insert(pos,
@@ -670,7 +667,7 @@ class InlinedVector {
//
// Destroys the element at `back()`, reducing the size by `1`.
void pop_back() noexcept {
- assert(!empty());
+ ABSL_HARDENING_ASSERT(!empty());
AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1));
storage_.SubtractSize(1);
@@ -683,8 +680,8 @@ class InlinedVector {
//
// NOTE: may return `end()`, which is not dereferencable.
iterator erase(const_iterator pos) {
- assert(pos >= begin());
- assert(pos < end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos < end());
return storage_.Erase(pos, pos + 1);
}
@@ -695,9 +692,9 @@ class InlinedVector {
//
// NOTE: may return `end()`, which is not dereferencable.
iterator erase(const_iterator from, const_iterator to) {
- assert(from >= begin());
- assert(from <= to);
- assert(to <= end());
+ ABSL_HARDENING_ASSERT(from >= begin());
+ ABSL_HARDENING_ASSERT(from <= to);
+ ABSL_HARDENING_ASSERT(to <= end());
if (ABSL_PREDICT_TRUE(from != to)) {
return storage_.Erase(from, to);
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc
index 3f2b4ed2..b8dafe93 100644
--- a/absl/container/inlined_vector_benchmark.cc
+++ b/absl/container/inlined_vector_benchmark.cc
@@ -83,7 +83,7 @@ int GetNonShortStringOptimizationSize() {
}
ABSL_RAW_LOG(
FATAL,
- "Failed to find a std::string larger than the short std::string optimization");
+ "Failed to find a string larger than the short string optimization");
return -1;
}
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 2c9b0d0e..415c60d9 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -30,6 +30,7 @@
#include "absl/base/internal/exception_testing.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/base/options.h"
#include "absl/container/internal/counting_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/hash/hash_testing.h"
@@ -247,6 +248,16 @@ TEST(IntVec, Erase) {
}
}
+TEST(IntVec, Hardened) {
+ IntVec v;
+ Fill(&v, 10);
+ EXPECT_EQ(v[9], 9);
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+ EXPECT_DEATH_IF_SUPPORTED(v[10], "");
+ EXPECT_DEATH_IF_SUPPORTED(v[-1], "");
+#endif
+}
+
// At the end of this test loop, the elements between [erase_begin, erase_end)
// should have reference counts == 0, and all others elements should have
// reference counts == 1.
@@ -780,7 +791,7 @@ TEST(IntVec, Reserve) {
TEST(StringVec, SelfRefPushBack) {
std::vector<std::string> std_v;
absl::InlinedVector<std::string, 4> v;
- const std::string s = "A quite long std::string to ensure heap.";
+ const std::string s = "A quite long string to ensure heap.";
std_v.push_back(s);
v.push_back(s);
for (int i = 0; i < 20; ++i) {
@@ -795,7 +806,7 @@ TEST(StringVec, SelfRefPushBack) {
TEST(StringVec, SelfRefPushBackWithMove) {
std::vector<std::string> std_v;
absl::InlinedVector<std::string, 4> v;
- const std::string s = "A quite long std::string to ensure heap.";
+ const std::string s = "A quite long string to ensure heap.";
std_v.push_back(s);
v.push_back(s);
for (int i = 0; i < 20; ++i) {
@@ -808,7 +819,7 @@ TEST(StringVec, SelfRefPushBackWithMove) {
}
TEST(StringVec, SelfMove) {
- const std::string s = "A quite long std::string to ensure heap.";
+ const std::string s = "A quite long string to ensure heap.";
for (int len = 0; len < 20; len++) {
SCOPED_TRACE(len);
absl::InlinedVector<std::string, 8> v;
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index fd5c0e7a..002ccc1e 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -65,6 +65,7 @@
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
+#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
#include "absl/utility/utility.h"
@@ -93,6 +94,19 @@ struct StringBtreeDefaultLess {
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
}
+ StringBtreeDefaultLess(std::less<absl::Cord>) {} // NOLINT
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs));
+ }
};
struct StringBtreeDefaultGreater {
@@ -107,17 +121,30 @@ struct StringBtreeDefaultGreater {
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
}
+ StringBtreeDefaultGreater(std::greater<absl::Cord>) {} // NOLINT
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
};
// A helper class to convert a boolean comparison into a three-way "compare-to"
-// comparison that returns a negative value to indicate less-than, zero to
-// indicate equality and a positive value to indicate greater-than. This helper
+// comparison that returns an `absl::weak_ordering`. This helper
// class is specialized for less<std::string>, greater<std::string>,
-// less<string_view>, and greater<string_view>.
+// less<string_view>, greater<string_view>, less<absl::Cord>, and
+// greater<absl::Cord>.
//
// key_compare_to_adapter is provided so that btree users
// automatically get the more efficient compare-to code when using common
-// google string types with common comparison functors.
+// Abseil string types with common comparison functors.
// These string-like specializations also turn on heterogeneous lookup by
// default.
template <typename Compare>
@@ -145,12 +172,25 @@ struct key_compare_to_adapter<std::greater<absl::string_view>> {
using type = StringBtreeDefaultGreater;
};
+template <>
+struct key_compare_to_adapter<std::less<absl::Cord>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<absl::Cord>> {
+ using type = StringBtreeDefaultGreater;
+};
+
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool Multi, typename SlotPolicy>
struct common_params {
- // If Compare is a common comparator for a std::string-like type, then we adapt it
+ // If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator.
using key_compare = typename key_compare_to_adapter<Compare>::type;
+ // True when key_compare has been adapted to StringBtreeDefault{Less,Greater}.
+ using is_key_compare_adapted =
+ absl::negation<std::is_same<key_compare, Compare>>;
// A type which indicates if we have a key-compare-to functor or a plain old
// key-compare functor.
using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
@@ -217,10 +257,6 @@ struct common_params {
static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
slot_policy::move(alloc, src, dest);
}
- static void move(Alloc *alloc, slot_type *first, slot_type *last,
- slot_type *result) {
- slot_policy::move(alloc, first, last, result);
- }
};
// A parameters structure for holding the type parameters for a btree_map.
@@ -252,9 +288,17 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
};
using is_map_container = std::true_type;
- static const Key &key(const value_type &x) { return x.first; }
- static const Key &key(const init_type &x) { return x.first; }
- static const Key &key(const slot_type *x) { return slot_policy::key(x); }
+ template <typename V>
+ static auto key(const V &value) -> decltype(value.first) {
+ return value.first;
+ }
+ static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+ static const Key &key(slot_type *s) { return slot_policy::key(s); }
+ // For use in node handle.
+ static auto mutable_key(slot_type *s)
+ -> decltype(slot_policy::mutable_key(s)) {
+ return slot_policy::mutable_key(s);
+ }
static mapped_type &value(value_type *value) { return value->second; }
};
@@ -295,13 +339,6 @@ struct set_slot_policy {
static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
*dest = std::move(*src);
}
-
- template <typename Alloc>
- static void move(Alloc *alloc, slot_type *first, slot_type *last,
- slot_type *result) {
- for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
- move(alloc, src, dest);
- }
};
// A parameters structure for holding the type parameters for a btree_set.
@@ -315,8 +352,10 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
using value_compare = typename set_params::common_params::key_compare;
using is_map_container = std::false_type;
- static const Key &key(const value_type &x) { return x; }
- static const Key &key(const slot_type *x) { return *x; }
+ template <typename V>
+ static const V &key(const V &value) { return value; }
+ static const Key &key(const slot_type *slot) { return *slot; }
+ static const Key &key(slot_type *slot) { return *slot; }
};
// An adapter class that converts a lower-bound compare into an upper-bound
@@ -326,8 +365,8 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
template <typename Compare>
struct upper_bound_adapter {
explicit upper_bound_adapter(const Compare &c) : comp(c) {}
- template <typename K, typename LK>
- bool operator()(const K &a, const LK &b) const {
+ template <typename K1, typename K2>
+ bool operator()(const K1 &a, const K2 &b) const {
// Returns true when a is not greater than b.
return !compare_internal::compare_result_as_less_than(comp(b, a));
}
@@ -716,14 +755,10 @@ class btree_node {
template <typename... Args>
void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
- // Removes the value at position i, shifting all existing values and children
- // at positions > i to the left by 1.
- void remove_value(int i, allocator_type *alloc);
-
- // Removes the values at positions [i, i + to_erase), shifting all values
- // after that range to the left by to_erase. Does not change children at all.
- void remove_values_ignore_children(int i, int to_erase,
- allocator_type *alloc);
+ // Removes the values at positions [i, i + to_erase), shifting all existing
+ // values and children after that range to the left by to_erase. Clears all
+ // children between [i, i + to_erase).
+ void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
// Rebalances a node with its right sibling.
void rebalance_right_to_left(int to_move, btree_node *right,
@@ -735,40 +770,36 @@ class btree_node {
void split(int insert_position, btree_node *dest, allocator_type *alloc);
// Merges a node with its right sibling, moving all of the values and the
- // delimiting key in the parent node onto itself.
- void merge(btree_node *sibling, allocator_type *alloc);
-
- // Swap the contents of "this" and "src".
- void swap(btree_node *src, allocator_type *alloc);
+ // delimiting key in the parent node onto itself, and deleting the src node.
+ void merge(btree_node *src, allocator_type *alloc);
// Node allocation/deletion routines.
- static btree_node *init_leaf(btree_node *n, btree_node *parent,
- int max_count) {
- n->set_parent(parent);
- n->set_position(0);
- n->set_start(0);
- n->set_finish(0);
- n->set_max_count(max_count);
+ void init_leaf(btree_node *parent, int max_count) {
+ set_parent(parent);
+ set_position(0);
+ set_start(0);
+ set_finish(0);
+ set_max_count(max_count);
absl::container_internal::SanitizerPoisonMemoryRegion(
- n->start_slot(), max_count * sizeof(slot_type));
- return n;
+ start_slot(), max_count * sizeof(slot_type));
}
- static btree_node *init_internal(btree_node *n, btree_node *parent) {
- init_leaf(n, parent, kNodeValues);
+ void init_internal(btree_node *parent) {
+ init_leaf(parent, kNodeValues);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
- n->set_max_count(kInternalNodeMaxCount);
+ set_max_count(kInternalNodeMaxCount);
absl::container_internal::SanitizerPoisonMemoryRegion(
- &n->mutable_child(n->start()),
- (kNodeValues + 1) * sizeof(btree_node *));
- return n;
+ &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *));
}
- void destroy(allocator_type *alloc) {
- for (int i = start(); i < finish(); ++i) {
- value_destroy(i, alloc);
- }
+
+ static void deallocate(const size_type size, btree_node *node,
+ allocator_type *alloc) {
+ absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
}
+ // Deletes a node and all of its children.
+ static void clear_and_delete(btree_node *node, allocator_type *alloc);
+
public:
// Exposed only for tests.
static bool testonly_uses_linear_node_search() {
@@ -777,33 +808,55 @@ class btree_node {
private:
template <typename... Args>
- void value_init(const size_type i, allocator_type *alloc, Args &&... args) {
+ void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
absl::container_internal::SanitizerUnpoisonObject(slot(i));
params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
}
- void value_destroy(const size_type i, allocator_type *alloc) {
+ void value_destroy(const field_type i, allocator_type *alloc) {
params_type::destroy(alloc, slot(i));
absl::container_internal::SanitizerPoisonObject(slot(i));
}
+ void value_destroy_n(const field_type i, const field_type n,
+ allocator_type *alloc) {
+ for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
+ params_type::destroy(alloc, s);
+ absl::container_internal::SanitizerPoisonObject(s);
+ }
+ }
+
+ static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
+ absl::container_internal::SanitizerUnpoisonObject(dest);
+ params_type::transfer(alloc, dest, src);
+ absl::container_internal::SanitizerPoisonObject(src);
+ }
+
+ // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
+ void transfer(const size_type dest_i, const size_type src_i,
+ btree_node *src_node, allocator_type *alloc) {
+ transfer(slot(dest_i), src_node->slot(src_i), alloc);
+ }
- // Move n values starting at value i in this node into the values starting at
- // value j in node x.
- void uninitialized_move_n(const size_type n, const size_type i,
- const size_type j, btree_node *x,
- allocator_type *alloc) {
- absl::container_internal::SanitizerUnpoisonMemoryRegion(
- x->slot(j), n * sizeof(slot_type));
- for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j);
+ // Transfers `n` values starting at value `src_i` in `src_node` into the
+ // values starting at value `dest_i` in `this`.
+ void transfer_n(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i), *end = src + n,
+ *dest = slot(dest_i);
src != end; ++src, ++dest) {
- params_type::construct(alloc, dest, src);
+ transfer(dest, src, alloc);
}
}
- // Destroys a range of n values, starting at index i.
- void value_destroy_n(const size_type i, const size_type n,
- allocator_type *alloc) {
- for (int j = 0; j < n; ++j) {
- value_destroy(i + j, alloc);
+ // Same as above, except that we start at the end and work our way to the
+ // beginning.
+ void transfer_n_backward(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
+ *dest = slot(dest_i + n - 1);
+ src != end; --src, --dest) {
+ transfer(dest, src, alloc);
}
}
@@ -856,8 +909,8 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, iterator>::value &&
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
- btree_iterator(const btree_iterator<N, R, P> &x) // NOLINT
- : node(x.node), position(x.position) {}
+ btree_iterator(const btree_iterator<N, R, P> &other) // NOLINT
+ : node(other.node), position(other.position) {}
private:
// This SFINAE allows explicit conversions from const_iterator to
@@ -869,8 +922,8 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
std::is_same<btree_iterator, iterator>::value,
int> = 0>
- explicit btree_iterator(const btree_iterator<N, R, P> &x)
- : node(const_cast<node_type *>(x.node)), position(x.position) {}
+ explicit btree_iterator(const btree_iterator<N, R, P> &other)
+ : node(const_cast<node_type *>(other.node)), position(other.position) {}
// Increment/decrement the iterator.
void increment() {
@@ -890,16 +943,27 @@ struct btree_iterator {
void decrement_slow();
public:
- bool operator==(const const_iterator &x) const {
- return node == x.node && position == x.position;
+ bool operator==(const iterator &other) const {
+ return node == other.node && position == other.position;
+ }
+ bool operator==(const const_iterator &other) const {
+ return node == other.node && position == other.position;
}
- bool operator!=(const const_iterator &x) const {
- return node != x.node || position != x.position;
+ bool operator!=(const iterator &other) const {
+ return node != other.node || position != other.position;
+ }
+ bool operator!=(const const_iterator &other) const {
+ return node != other.node || position != other.position;
}
// Accessors for the key/value the iterator is pointing at.
- reference operator*() const { return node->value(position); }
- pointer operator->() const { return &node->value(position); }
+ reference operator*() const {
+ ABSL_HARDENING_ASSERT(node != nullptr);
+ ABSL_HARDENING_ASSERT(node->start() <= position);
+ ABSL_HARDENING_ASSERT(node->finish() > position);
+ return node->value(position);
+ }
+ pointer operator->() const { return &operator*(); }
btree_iterator &operator++() {
increment();
@@ -942,7 +1006,8 @@ struct btree_iterator {
// The node in the tree the iterator is pointing at.
Node *node;
// The position within the node of the tree the iterator is pointing at.
- // TODO(ezb): make this a field_type
+ // NOTE: this is an int rather than a field_type because iterators can point
+ // to invalid positions (such as -1) in certain circumstances.
int position;
};
@@ -950,6 +1015,10 @@ template <typename Params>
class btree {
using node_type = btree_node<Params>;
using is_key_compare_to = typename Params::is_key_compare_to;
+ using init_type = typename Params::init_type;
+ using field_type = typename node_type::field_type;
+ using is_multi_container = typename Params::is_multi_container;
+ using is_key_compare_adapted = typename Params::is_key_compare_adapted;
// We use a static empty node for the root/leftmost/rightmost of empty btrees
// in order to avoid branching in begin()/end().
@@ -984,7 +1053,7 @@ class btree {
#endif
}
- enum {
+ enum : uint32_t {
kNodeValues = node_type::kNodeValues,
kMinNodeValues = kNodeValues / 2,
};
@@ -994,9 +1063,9 @@ class btree {
node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
- node_stats &operator+=(const node_stats &x) {
- leaf_nodes += x.leaf_nodes;
- internal_nodes += x.internal_nodes;
+ node_stats &operator+=(const node_stats &other) {
+ leaf_nodes += other.leaf_nodes;
+ internal_nodes += other.internal_nodes;
return *this;
}
@@ -1028,15 +1097,15 @@ class btree {
private:
// For use in copy_or_move_values_in_order.
- const value_type &maybe_move_from_iterator(const_iterator x) { return *x; }
- value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); }
+ const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
+ value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); }
// Copies or moves (depending on the template parameter) the values in
- // x into this btree in their order in x. This btree must be empty before this
- // method is called. This method is used in copy construction, copy
- // assignment, and move assignment.
+ // other into this btree in their order in other. This btree must be empty
+ // before this method is called. This method is used in copy construction,
+ // copy assignment, and move assignment.
template <typename Btree>
- void copy_or_move_values_in_order(Btree *x);
+ void copy_or_move_values_in_order(Btree *other);
// Validates that various assumptions/requirements are true at compile time.
constexpr static bool static_assert_validation();
@@ -1044,12 +1113,12 @@ class btree {
public:
btree(const key_compare &comp, const allocator_type &alloc);
- btree(const btree &x);
- btree(btree &&x) noexcept
- : root_(std::move(x.root_)),
- rightmost_(absl::exchange(x.rightmost_, EmptyNode())),
- size_(absl::exchange(x.size_, 0)) {
- x.mutable_root() = EmptyNode();
+ btree(const btree &other);
+ btree(btree &&other) noexcept
+ : root_(std::move(other.root_)),
+ rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
+ size_(absl::exchange(other.size_, 0)) {
+ other.mutable_root() = EmptyNode();
}
~btree() {
@@ -1059,9 +1128,9 @@ class btree {
clear();
}
- // Assign the contents of x to *this.
- btree &operator=(const btree &x);
- btree &operator=(btree &&x) noexcept;
+ // Assign the contents of other to *this.
+ btree &operator=(const btree &other);
+ btree &operator=(btree &&other) noexcept;
iterator begin() { return iterator(leftmost()); }
const_iterator begin() const { return const_iterator(leftmost()); }
@@ -1099,23 +1168,21 @@ class btree {
}
// Finds the range of values which compare equal to key. The first member of
- // the returned pair is equal to lower_bound(key). The second member pair of
- // the pair is equal to upper_bound(key).
+ // the returned pair is equal to lower_bound(key). The second member of the
+ // pair is equal to upper_bound(key).
template <typename K>
- std::pair<iterator, iterator> equal_range(const K &key) {
- return {lower_bound(key), upper_bound(key)};
- }
+ std::pair<iterator, iterator> equal_range(const K &key);
template <typename K>
std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
- return {lower_bound(key), upper_bound(key)};
+ return const_cast<btree *>(this)->equal_range(key);
}
// Inserts a value into the btree only if it does not already exist. The
// boolean return value indicates whether insertion succeeded or failed.
// Requirement: if `key` already exists in the btree, does not consume `args`.
// Requirement: `key` is never referenced after consuming `args`.
- template <typename... Args>
- std::pair<iterator, bool> insert_unique(const key_type &key, Args &&... args);
+ template <typename K, typename... Args>
+ std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
// Inserts with hint. Checks to see if the value should be placed immediately
// before `position` in the tree. If so, then the insertion will take
@@ -1123,14 +1190,23 @@ class btree {
// logarithmic time as if a call to insert_unique() were made.
// Requirement: if `key` already exists in the btree, does not consume `args`.
// Requirement: `key` is never referenced after consuming `args`.
- template <typename... Args>
+ template <typename K, typename... Args>
std::pair<iterator, bool> insert_hint_unique(iterator position,
- const key_type &key,
+ const K &key,
Args &&... args);
// Insert a range of values into the btree.
+ // Note: the first overload avoids constructing a value_type if the key
+ // already exists in the btree.
+ template <typename InputIterator,
+ typename = decltype(std::declval<const key_compare &>()(
+ params_type::key(*std::declval<InputIterator>()),
+ std::declval<const key_type &>()))>
+ void insert_iterator_unique(InputIterator b, InputIterator e, int);
+ // We need the second overload for cases in which we need to construct a
+ // value_type in order to compare it with the keys already in the btree.
template <typename InputIterator>
- void insert_iterator_unique(InputIterator b, InputIterator e);
+ void insert_iterator_unique(InputIterator b, InputIterator e, char);
// Inserts a value into the btree.
template <typename ValueType>
@@ -1204,15 +1280,15 @@ class btree {
// Clear the btree, deleting all of the values it contains.
void clear();
- // Swap the contents of *this and x.
- void swap(btree &x);
+ // Swaps the contents of `this` and `other`.
+ void swap(btree &other);
const key_compare &key_comp() const noexcept {
return root_.template get<0>();
}
- template <typename K, typename LK>
- bool compare_keys(const K &x, const LK &y) const {
- return compare_internal::compare_result_as_less_than(key_comp()(x, y));
+ template <typename K1, typename K2>
+ bool compare_keys(const K1 &a, const K2 &b) const {
+ return compare_internal::compare_result_as_less_than(key_comp()(a, b));
}
value_compare value_comp() const { return value_compare(key_comp()); }
@@ -1322,38 +1398,24 @@ class btree {
// Node creation/deletion routines.
node_type *new_internal_node(node_type *parent) {
- node_type *p = allocate(node_type::InternalSize());
- return node_type::init_internal(p, parent);
+ node_type *n = allocate(node_type::InternalSize());
+ n->init_internal(parent);
+ return n;
}
node_type *new_leaf_node(node_type *parent) {
- node_type *p = allocate(node_type::LeafSize());
- return node_type::init_leaf(p, parent, kNodeValues);
+ node_type *n = allocate(node_type::LeafSize());
+ n->init_leaf(parent, kNodeValues);
+ return n;
}
node_type *new_leaf_root_node(const int max_count) {
- node_type *p = allocate(node_type::LeafSize(max_count));
- return node_type::init_leaf(p, p, max_count);
+ node_type *n = allocate(node_type::LeafSize(max_count));
+ n->init_leaf(/*parent=*/n, max_count);
+ return n;
}
// Deletion helper routines.
- void erase_same_node(iterator begin, iterator end);
- iterator erase_from_leaf_node(iterator begin, size_type to_erase);
iterator rebalance_after_delete(iterator iter);
- // Deallocates a node of a certain size in bytes using the allocator.
- void deallocate(const size_type size, node_type *node) {
- absl::container_internal::Deallocate<node_type::Alignment()>(
- mutable_allocator(), node, size);
- }
-
- void delete_internal_node(node_type *node) {
- node->destroy(mutable_allocator());
- deallocate(node_type::InternalSize(), node);
- }
- void delete_leaf_node(node_type *node) {
- node->destroy(mutable_allocator());
- deallocate(node_type::LeafSize(node->max_count()), node);
- }
-
// Rebalances or splits the node iter points to.
void rebalance_or_split(iterator *iter);
@@ -1422,9 +1484,6 @@ class btree {
template <typename K>
iterator internal_find(const K &key) const;
- // Deletes a node and all of its children.
- void internal_clear(node_type *node);
-
// Verifies the tree structure of node.
int internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const;
@@ -1477,10 +1536,8 @@ inline void btree_node<P>::emplace_value(const size_type i,
// Shift old values to create space for new value and then construct it in
// place.
if (i < finish()) {
- value_init(finish(), alloc, slot(finish() - 1));
- for (size_type j = finish() - 1; j > i; --j)
- params_type::move(alloc, slot(j - 1), slot(j));
- value_destroy(i, alloc);
+ transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
+ alloc);
}
value_init(i, alloc, std::forward<Args>(args)...);
set_finish(finish() + 1);
@@ -1494,24 +1551,27 @@ inline void btree_node<P>::emplace_value(const size_type i,
}
template <typename P>
-inline void btree_node<P>::remove_value(const int i, allocator_type *alloc) {
- if (!leaf() && finish() > i + 1) {
- assert(child(i + 1)->count() == 0);
- for (size_type j = i + 1; j < finish(); ++j) {
- set_child(j, child(j + 1));
+inline void btree_node<P>::remove_values(const field_type i,
+ const field_type to_erase,
+ allocator_type *alloc) {
+ // Transfer values after the removed range into their new places.
+ value_destroy_n(i, to_erase, alloc);
+ const field_type orig_finish = finish();
+ const field_type src_i = i + to_erase;
+ transfer_n(orig_finish - src_i, i, src_i, this, alloc);
+
+ if (!leaf()) {
+ // Delete all children between begin and end.
+ for (int j = 0; j < to_erase; ++j) {
+ clear_and_delete(child(i + j + 1), alloc);
+ }
+ // Rotate children after end into new positions.
+ for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
+ set_child(j - to_erase, child(j));
+ clear_child(j);
}
- clear_child(finish());
}
-
- remove_values_ignore_children(i, /*to_erase=*/1, alloc);
-}
-
-template <typename P>
-inline void btree_node<P>::remove_values_ignore_children(
- const int i, const int to_erase, allocator_type *alloc) {
- params_type::move(alloc, slot(i + to_erase), finish_slot(), slot(i));
- value_destroy_n(finish() - to_erase, to_erase, alloc);
- set_finish(finish() - to_erase);
+ set_finish(orig_finish - to_erase);
}
template <typename P>
@@ -1525,22 +1585,17 @@ void btree_node<P>::rebalance_right_to_left(const int to_move,
assert(to_move <= right->count());
// 1) Move the delimiting value in the parent to the left node.
- value_init(finish(), alloc, parent()->slot(position()));
+ transfer(finish(), position(), parent(), alloc);
// 2) Move the (to_move - 1) values from the right node to the left node.
- right->uninitialized_move_n(to_move - 1, right->start(), finish() + 1, this,
- alloc);
+ transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc);
// 3) Move the new delimiting value to the parent from the right node.
- params_type::move(alloc, right->slot(to_move - 1),
- parent()->slot(position()));
+ parent()->transfer(position(), right->start() + to_move - 1, right, alloc);
- // 4) Shift the values in the right node to their correct position.
- params_type::move(alloc, right->slot(to_move), right->finish_slot(),
- right->start_slot());
-
- // 5) Destroy the now-empty to_move entries in the right node.
- right->value_destroy_n(right->finish() - to_move, to_move, alloc);
+ // 4) Shift the values in the right node to their correct positions.
+ right->transfer_n(right->count() - to_move, right->start(),
+ right->start() + to_move, right, alloc);
if (!leaf()) {
// Move the child pointers from the right to the left node.
@@ -1575,54 +1630,19 @@ void btree_node<P>::rebalance_left_to_right(const int to_move,
// Lastly, a new delimiting value is moved from the left node into the
// parent, and the remaining empty left node entries are destroyed.
- if (right->count() >= to_move) {
- // The original location of the right->count() values are sufficient to hold
- // the new to_move entries from the parent and left node.
-
- // 1) Shift existing values in the right node to their correct positions.
- right->uninitialized_move_n(to_move, right->finish() - to_move,
- right->finish(), right, alloc);
- for (slot_type *src = right->slot(right->finish() - to_move - 1),
- *dest = right->slot(right->finish() - 1),
- *end = right->start_slot();
- src >= end; --src, --dest) {
- params_type::move(alloc, src, dest);
- }
+ // 1) Shift existing values in the right node to their correct positions.
+ right->transfer_n_backward(right->count(), right->start() + to_move,
+ right->start(), right, alloc);
- // 2) Move the delimiting value in the parent to the right node.
- params_type::move(alloc, parent()->slot(position()),
- right->slot(to_move - 1));
+ // 2) Move the delimiting value in the parent to the right node.
+ right->transfer(right->start() + to_move - 1, position(), parent(), alloc);
- // 3) Move the (to_move - 1) values from the left node to the right node.
- params_type::move(alloc, slot(finish() - (to_move - 1)), finish_slot(),
- right->start_slot());
- } else {
- // The right node does not have enough initialized space to hold the new
- // to_move entries, so part of them will move to uninitialized space.
-
- // 1) Shift existing values in the right node to their correct positions.
- right->uninitialized_move_n(right->count(), right->start(),
- right->start() + to_move, right, alloc);
-
- // 2) Move the delimiting value in the parent to the right node.
- right->value_init(to_move - 1, alloc, parent()->slot(position()));
-
- // 3) Move the (to_move - 1) values from the left node to the right node.
- const size_type uninitialized_remaining = to_move - right->count() - 1;
- uninitialized_move_n(uninitialized_remaining,
- finish() - uninitialized_remaining, right->finish(),
- right, alloc);
- params_type::move(alloc, slot(finish() - (to_move - 1)),
- slot(finish() - uninitialized_remaining),
- right->start_slot());
- }
+ // 3) Move the (to_move - 1) values from the left node to the right node.
+ right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this,
+ alloc);
// 4) Move the new delimiting value to the parent from the left node.
- params_type::move(alloc, slot(finish() - to_move),
- parent()->slot(position()));
-
- // 5) Destroy the now-empty to_move entries in the left node.
- value_destroy_n(finish() - to_move, to_move, alloc);
+ parent()->transfer(position(), finish() - to_move, this, alloc);
if (!leaf()) {
// Move the child pointers from the left to the right node.
@@ -1662,10 +1682,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
assert(count() >= 1);
// Move values from the left sibling to the right sibling.
- uninitialized_move_n(dest->count(), finish(), dest->start(), dest, alloc);
-
- // Destroy the now-empty entries in the left node.
- value_destroy_n(finish(), dest->count(), alloc);
+ dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc);
// The split key is the largest value in the left sibling.
--mutable_finish();
@@ -1692,11 +1709,7 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
value_init(finish(), alloc, parent()->slot(position()));
// Move the values from the right to the left node.
- src->uninitialized_move_n(src->count(), src->start(), finish() + 1, this,
- alloc);
-
- // Destroy the now-empty entries in the right node.
- src->value_destroy_n(src->start(), src->count(), alloc);
+ transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
if (!leaf()) {
// Move the child pointers from the right to the left node.
@@ -1710,56 +1723,59 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
set_finish(start() + 1 + count() + src->count());
src->set_finish(src->start());
- // Remove the value on the parent node.
- parent()->remove_value(position(), alloc);
+ // Remove the value on the parent node and delete the src node.
+ parent()->remove_values(position(), /*to_erase=*/1, alloc);
}
template <typename P>
-void btree_node<P>::swap(btree_node *x, allocator_type *alloc) {
- using std::swap;
- assert(leaf() == x->leaf());
-
- // Determine which is the smaller/larger node.
- btree_node *smaller = this, *larger = x;
- if (smaller->count() > larger->count()) {
- swap(smaller, larger);
+void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
+ if (node->leaf()) {
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ return;
}
-
- // Swap the values.
- for (slot_type *a = smaller->start_slot(), *b = larger->start_slot(),
- *end = smaller->finish_slot();
- a != end; ++a, ++b) {
- params_type::swap(alloc, a, b);
+ if (node->count() == 0) {
+ deallocate(InternalSize(), node, alloc);
+ return;
}
- // Move values that can't be swapped.
- const size_type to_move = larger->count() - smaller->count();
- larger->uninitialized_move_n(to_move, smaller->finish(), smaller->finish(),
- smaller, alloc);
- larger->value_destroy_n(smaller->finish(), to_move, alloc);
+ // The parent of the root of the subtree we are deleting.
+ btree_node *delete_root_parent = node->parent();
- if (!leaf()) {
- // Swap the child pointers.
- std::swap_ranges(&smaller->mutable_child(smaller->start()),
- &smaller->mutable_child(smaller->finish() + 1),
- &larger->mutable_child(larger->start()));
- // Update swapped children's parent pointers.
- int i = smaller->start();
- int j = larger->start();
- for (; i <= smaller->finish(); ++i, ++j) {
- smaller->child(i)->set_parent(smaller);
- larger->child(j)->set_parent(larger);
- }
- // Move the child pointers that couldn't be swapped.
- for (; j <= larger->finish(); ++i, ++j) {
- smaller->init_child(i, larger->child(j));
- larger->clear_child(j);
- }
+ // Navigate to the leftmost leaf under node, and then delete upwards.
+ while (!node->leaf()) node = node->start_child();
+ // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which
+ // isn't guaranteed to be a valid `field_type`.
+ int pos = node->position();
+ btree_node *parent = node->parent();
+ for (;;) {
+ // In each iteration of the next loop, we delete one leaf node and go right.
+ assert(pos <= parent->finish());
+ do {
+ node = parent->child(pos);
+ if (!node->leaf()) {
+ // Navigate to the leftmost leaf under node.
+ while (!node->leaf()) node = node->start_child();
+ pos = node->position();
+ parent = node->parent();
+ }
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ ++pos;
+ } while (pos <= parent->finish());
+
+ // Once we've deleted all children of parent, delete parent and go up/right.
+ assert(pos > parent->finish());
+ do {
+ node = parent;
+ pos = node->position();
+ parent = node->parent();
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(InternalSize(), node, alloc);
+ if (parent == delete_root_parent) return;
+ ++pos;
+ } while (pos > parent->finish());
}
-
- // Swap the `finish`s.
- // TODO(ezb): with floating storage, will also need to swap starts.
- swap(mutable_finish(), x->mutable_finish());
}
////
@@ -1774,6 +1790,7 @@ void btree_iterator<N, R, P>::increment_slow() {
position = node->position();
node = node->parent();
}
+ // TODO(ezb): assert we aren't incrementing end() instead of handling.
if (position == node->finish()) {
*this = save;
}
@@ -1797,6 +1814,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
position = node->position() - 1;
node = node->parent();
}
+ // TODO(ezb): assert we aren't decrementing begin() instead of handling.
if (position < node->start()) {
*this = save;
}
@@ -1814,7 +1832,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
// btree methods
template <typename P>
template <typename Btree>
-void btree<P>::copy_or_move_values_in_order(Btree *x) {
+void btree<P>::copy_or_move_values_in_order(Btree *other) {
static_assert(std::is_same<btree, Btree>::value ||
std::is_same<const btree, Btree>::value,
"Btree type must be same or const.");
@@ -1822,11 +1840,11 @@ void btree<P>::copy_or_move_values_in_order(Btree *x) {
// We can avoid key comparisons because we know the order of the
// values is the same order we'll store them in.
- auto iter = x->begin();
- if (iter == x->end()) return;
+ auto iter = other->begin();
+ if (iter == other->end()) return;
insert_multi(maybe_move_from_iterator(iter));
++iter;
- for (; iter != x->end(); ++iter) {
+ for (; iter != other->end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end
// of the tree.
internal_emplace(end(), maybe_move_from_iterator(iter));
@@ -1869,13 +1887,48 @@ btree<P>::btree(const key_compare &comp, const allocator_type &alloc)
: root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
template <typename P>
-btree<P>::btree(const btree &x) : btree(x.key_comp(), x.allocator()) {
- copy_or_move_values_in_order(&x);
+btree<P>::btree(const btree &other)
+ : btree(other.key_comp(), other.allocator()) {
+ copy_or_move_values_in_order(&other);
}
template <typename P>
-template <typename... Args>
-auto btree<P>::insert_unique(const key_type &key, Args &&... args)
+template <typename K>
+auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
+ const iterator lower = lower_bound(key);
+ // TODO(ezb): we should be able to avoid this comparison when there's a
+ // three-way comparator.
+ if (lower == end() || compare_keys(key, lower.key())) return {lower, lower};
+
+ const iterator next = std::next(lower);
+ // When the comparator is heterogeneous, we can't assume that comparison with
+ // non-`key_type` will be equivalent to `key_type` comparisons so there
+ // could be multiple equivalent keys even in a unique-container. But for
+ // heterogeneous comparisons from the default string adapted comparators, we
+ // don't need to worry about this.
+ if (!is_multi_container::value &&
+ (std::is_same<K, key_type>::value || is_key_compare_adapted::value)) {
+ // The next iterator after lower must point to a key greater than `key`.
+ // Note: if this assert fails, then it may indicate that the comparator does
+ // not meet the equivalence requirements for Compare
+ // (see https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(next == end() || compare_keys(key, next.key()));
+ return {lower, next};
+ }
+ // Try once more to avoid the call to upper_bound() if there's only one
+ // equivalent key. This should prevent all calls to upper_bound() in cases of
+ // unique-containers with heterogeneous comparators in which all comparison
+ // operators are equivalent.
+ if (next == end() || compare_keys(key, next.key())) return {lower, next};
+
+ // In this case, we need to call upper_bound() to avoid worst case O(N)
+ // behavior if we were to iterate over equal keys.
+ return {lower, upper_bound(key)};
+}
+
+template <typename P>
+template <typename K, typename... Args>
+auto btree<P>::insert_unique(const K &key, Args &&... args)
-> std::pair<iterator, bool> {
if (empty()) {
mutable_root() = rightmost_ = new_leaf_root_node(1);
@@ -1900,8 +1953,8 @@ auto btree<P>::insert_unique(const key_type &key, Args &&... args)
}
template <typename P>
-template <typename... Args>
-inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
+template <typename K, typename... Args>
+inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
Args &&... args)
-> std::pair<iterator, bool> {
if (!empty()) {
@@ -1925,14 +1978,23 @@ inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
}
template <typename P>
-template <typename InputIterator>
-void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e) {
+template <typename InputIterator, typename>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
for (; b != e; ++b) {
insert_hint_unique(end(), params_type::key(*b), *b);
}
}
template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
+ for (; b != e; ++b) {
+ init_type value(*b);
+ insert_hint_unique(end(), params_type::key(value), std::move(value));
+ }
+}
+
+template <typename P>
template <typename ValueType>
auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
if (empty()) {
@@ -1977,46 +2039,47 @@ void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
}
template <typename P>
-auto btree<P>::operator=(const btree &x) -> btree & {
- if (this != &x) {
+auto btree<P>::operator=(const btree &other) -> btree & {
+ if (this != &other) {
clear();
- *mutable_key_comp() = x.key_comp();
+ *mutable_key_comp() = other.key_comp();
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
- *mutable_allocator() = x.allocator();
+ *mutable_allocator() = other.allocator();
}
- copy_or_move_values_in_order(&x);
+ copy_or_move_values_in_order(&other);
}
return *this;
}
template <typename P>
-auto btree<P>::operator=(btree &&x) noexcept -> btree & {
- if (this != &x) {
+auto btree<P>::operator=(btree &&other) noexcept -> btree & {
+ if (this != &other) {
clear();
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
// Note: `root_` also contains the allocator and the key comparator.
- swap(root_, x.root_);
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ swap(root_, other.root_);
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
} else {
- if (allocator() == x.allocator()) {
- swap(mutable_root(), x.mutable_root());
- swap(*mutable_key_comp(), *x.mutable_key_comp());
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ if (allocator() == other.allocator()) {
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
} else {
// We aren't allowed to propagate the allocator and the allocator is
// different so we can't take over its memory. We must move each element
- // individually. We need both `x` and `this` to have `x`s key comparator
- // while moving the values so we can't swap the key comparators.
- *mutable_key_comp() = x.key_comp();
- copy_or_move_values_in_order(&x);
+ // individually. We need both `other` and `this` to have `other`s key
+ // comparator while moving the values so we can't swap the key
+ // comparators.
+ *mutable_key_comp() = other.key_comp();
+ copy_or_move_values_in_order(&other);
}
}
}
@@ -2028,7 +2091,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
bool internal_delete = false;
if (!iter.node->leaf()) {
// Deletion of a value on an internal node. First, move the largest value
- // from our left child here, then delete that position (in remove_value()
+ // from our left child here, then delete that position (in remove_values()
// below). We can get to the largest value from our left child by
// decrementing iter.
iterator internal_iter(iter);
@@ -2040,7 +2103,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
}
// Delete the key from the leaf.
- iter.node->remove_value(iter.position, mutable_allocator());
+ iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
--size_;
// We want to return the next value after the one we just erased. If we
@@ -2115,7 +2178,9 @@ auto btree<P>::erase_range(iterator begin, iterator end)
}
if (begin.node == end.node) {
- erase_same_node(begin, end);
+ assert(end.position > begin.position);
+ begin.node->remove_values(begin.position, end.position - begin.position,
+ mutable_allocator());
size_ -= count;
return {count, rebalance_after_delete(begin)};
}
@@ -2125,8 +2190,11 @@ auto btree<P>::erase_range(iterator begin, iterator end)
if (begin.node->leaf()) {
const size_type remaining_to_erase = size_ - target_size;
const size_type remaining_in_node = begin.node->finish() - begin.position;
- begin = erase_from_leaf_node(
- begin, (std::min)(remaining_to_erase, remaining_in_node));
+ const size_type to_erase =
+ (std::min)(remaining_to_erase, remaining_in_node);
+ begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+ size_ -= to_erase;
+ begin = rebalance_after_delete(begin);
} else {
begin = erase(begin);
}
@@ -2135,51 +2203,6 @@ auto btree<P>::erase_range(iterator begin, iterator end)
}
template <typename P>
-void btree<P>::erase_same_node(iterator begin, iterator end) {
- assert(begin.node == end.node);
- assert(end.position > begin.position);
-
- node_type *node = begin.node;
- size_type to_erase = end.position - begin.position;
- if (!node->leaf()) {
- // Delete all children between begin and end.
- for (size_type i = 0; i < to_erase; ++i) {
- internal_clear(node->child(begin.position + i + 1));
- }
- // Rotate children after end into new positions.
- for (size_type i = begin.position + to_erase + 1; i <= node->finish();
- ++i) {
- node->set_child(i - to_erase, node->child(i));
- node->clear_child(i);
- }
- }
- node->remove_values_ignore_children(begin.position, to_erase,
- mutable_allocator());
-
- // Do not need to update rightmost_, because
- // * either end == this->end(), and therefore node == rightmost_, and still
- // exists
- // * or end != this->end(), and therefore rightmost_ hasn't been erased, since
- // it wasn't covered in [begin, end)
-}
-
-template <typename P>
-auto btree<P>::erase_from_leaf_node(iterator begin, size_type to_erase)
- -> iterator {
- node_type *node = begin.node;
- assert(node->leaf());
- assert(node->finish() > begin.position);
- assert(begin.position + to_erase <= node->finish());
-
- node->remove_values_ignore_children(begin.position, to_erase,
- mutable_allocator());
-
- size_ -= to_erase;
-
- return rebalance_after_delete(begin);
-}
-
-template <typename P>
template <typename K>
auto btree<P>::erase_unique(const K &key) -> size_type {
const iterator iter = internal_find(key);
@@ -2207,7 +2230,7 @@ auto btree<P>::erase_multi(const K &key) -> size_type {
template <typename P>
void btree<P>::clear() {
if (!empty()) {
- internal_clear(root());
+ node_type::clear_and_delete(root(), mutable_allocator());
}
mutable_root() = EmptyNode();
rightmost_ = EmptyNode();
@@ -2215,20 +2238,20 @@ void btree<P>::clear() {
}
template <typename P>
-void btree<P>::swap(btree &x) {
+void btree<P>::swap(btree &other) {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_swap::value) {
// Note: `root_` also contains the allocator and the key comparator.
- swap(root_, x.root_);
+ swap(root_, other.root_);
} else {
// It's undefined behavior if the allocators are unequal here.
- assert(allocator() == x.allocator());
- swap(mutable_root(), x.mutable_root());
- swap(*mutable_key_comp(), *x.mutable_key_comp());
+ assert(allocator() == other.allocator());
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
}
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
}
template <typename P>
@@ -2348,12 +2371,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
template <typename P>
void btree<P>::merge_nodes(node_type *left, node_type *right) {
left->merge(right, mutable_allocator());
- if (right->leaf()) {
- if (rightmost_ == right) rightmost_ = left;
- delete_leaf_node(right);
- } else {
- delete_internal_node(right);
- }
+ if (rightmost_ == right) rightmost_ = left;
}
template <typename P>
@@ -2410,21 +2428,20 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
template <typename P>
void btree<P>::try_shrink() {
- if (root()->count() > 0) {
+ node_type *orig_root = root();
+ if (orig_root->count() > 0) {
return;
}
// Deleted the last item on the root node, shrink the height of the tree.
- if (root()->leaf()) {
+ if (orig_root->leaf()) {
assert(size() == 0);
- delete_leaf_node(root());
- mutable_root() = EmptyNode();
- rightmost_ = EmptyNode();
+ mutable_root() = rightmost_ = EmptyNode();
} else {
- node_type *child = root()->start_child();
+ node_type *child = orig_root->start_child();
child->make_root();
- delete_internal_node(root());
mutable_root() = child;
}
+ node_type::clear_and_delete(orig_root, mutable_allocator());
}
template <typename P>
@@ -2452,7 +2469,8 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
--iter;
++iter.position;
}
- const int max_count = iter.node->max_count();
+ const field_type max_count = iter.node->max_count();
+ allocator_type *alloc = mutable_allocator();
if (iter.node->count() == max_count) {
// Make room in the leaf for the new item.
if (max_count < kNodeValues) {
@@ -2461,16 +2479,20 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
assert(iter.node == root());
iter.node =
new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count));
- iter.node->swap(root(), mutable_allocator());
- delete_leaf_node(root());
- mutable_root() = iter.node;
- rightmost_ = iter.node;
+ // Transfer the values from the old root to the new root.
+ node_type *old_root = root();
+ node_type *new_root = iter.node;
+ new_root->transfer_n(old_root->count(), new_root->start(),
+ old_root->start(), old_root, alloc);
+ new_root->set_finish(old_root->finish());
+ old_root->set_finish(old_root->start());
+ node_type::clear_and_delete(old_root, alloc);
+ mutable_root() = rightmost_ = new_root;
} else {
rebalance_or_split(&iter);
}
}
- iter.node->emplace_value(iter.position, mutable_allocator(),
- std::forward<Args>(args)...);
+ iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
++size_;
return iter;
}
@@ -2568,18 +2590,6 @@ auto btree<P>::internal_find(const K &key) const -> iterator {
}
template <typename P>
-void btree<P>::internal_clear(node_type *node) {
- if (!node->leaf()) {
- for (int i = node->start(); i <= node->finish(); ++i) {
- internal_clear(node->child(i));
- }
- delete_internal_node(node);
- } else {
- delete_leaf_node(node);
- }
-}
-
-template <typename P>
int btree<P>::internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const {
assert(node->count() > 0);
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index f2e4c3a5..137614f8 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -68,10 +68,10 @@ class btree_container {
explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {}
- btree_container(const btree_container &x) = default;
- btree_container(btree_container &&x) noexcept = default;
- btree_container &operator=(const btree_container &x) = default;
- btree_container &operator=(btree_container &&x) noexcept(
+ btree_container(const btree_container &other) = default;
+ btree_container(btree_container &&other) noexcept = default;
+ btree_container &operator=(const btree_container &other) = default;
+ btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default;
// Iterator routines.
@@ -154,7 +154,7 @@ class btree_container {
public:
// Utility routines.
void clear() { tree_.clear(); }
- void swap(btree_container &x) { tree_.swap(x.tree_); }
+ void swap(btree_container &other) { tree_.swap(other.tree_); }
void verify() const { tree_.verify(); }
// Size routines.
@@ -257,42 +257,40 @@ class btree_set_container : public btree_container<Tree> {
}
// Insertion routines.
- std::pair<iterator, bool> insert(const value_type &x) {
- return this->tree_.insert_unique(params_type::key(x), x);
+ std::pair<iterator, bool> insert(const value_type &v) {
+ return this->tree_.insert_unique(params_type::key(v), v);
}
- std::pair<iterator, bool> insert(value_type &&x) {
- return this->tree_.insert_unique(params_type::key(x), std::move(x));
+ std::pair<iterator, bool> insert(value_type &&v) {
+ return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
template <typename... Args>
std::pair<iterator, bool> emplace(Args &&... args) {
init_type v(std::forward<Args>(args)...);
return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
- iterator insert(const_iterator position, const value_type &x) {
+ iterator insert(const_iterator hint, const value_type &v) {
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(x), x)
+ .insert_hint_unique(iterator(hint), params_type::key(v), v)
.first;
}
- iterator insert(const_iterator position, value_type &&x) {
+ iterator insert(const_iterator hint, value_type &&v) {
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(x),
- std::move(x))
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename... Args>
- iterator emplace_hint(const_iterator position, Args &&... args) {
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
init_type v(std::forward<Args>(args)...);
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(v),
- std::move(v))
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
- this->tree_.insert_iterator_unique(b, e);
+ this->tree_.insert_iterator_unique(b, e, 0);
}
void insert(std::initializer_list<init_type> init) {
- this->tree_.insert_iterator_unique(init.begin(), init.end());
+ this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
}
insert_return_type insert(node_type &&node) {
if (!node) return {this->end(), false, node_type()};
@@ -316,6 +314,8 @@ class btree_set_container : public btree_container<Tree> {
}
// Deletion routines.
+ // TODO(ezb): we should support heterogeneous comparators that have different
+ // behavior for K!=key_type.
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
return this->tree_.erase_unique(key);
@@ -392,111 +392,72 @@ class btree_map_container : public btree_set_container<Tree> {
// Insertion routines.
// Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments.
- // Note: when we call `std::forward<M>(obj)` twice, it's safe because
- // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
- // `ret.second` is false.
- template <class M>
- std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
- if (!ret.second) ret.first->second = obj;
- return ret;
+ template <typename K = key_type, class M>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
+ const M &obj) {
+ return insert_or_assign_impl(k, obj);
}
- template <class M, key_type * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, std::move(k), obj);
- if (!ret.second) ret.first->second = obj;
- return ret;
+ template <typename K = key_type, class M, K * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_impl(std::forward<K>(k), obj);
}
- template <class M, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, k, std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret;
+ template <typename K = key_type, class M, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_impl(k, std::forward<M>(obj));
}
- template <class M, key_type * = nullptr, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret;
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
}
- template <class M>
- iterator insert_or_assign(const_iterator position, const key_type &k,
+ template <typename K = key_type, class M>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
const M &obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_hint_unique(iterator(position), k, k, obj);
- if (!ret.second) ret.first->second = obj;
- return ret.first;
+ return insert_or_assign_hint_impl(hint, k, obj);
}
- template <class M, key_type * = nullptr>
- iterator insert_or_assign(const_iterator position, key_type &&k,
- const M &obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, std::move(k), obj);
- if (!ret.second) ret.first->second = obj;
- return ret.first;
+ template <typename K = key_type, class M, K * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
}
- template <class M, M * = nullptr>
- iterator insert_or_assign(const_iterator position, const key_type &k,
- M &&obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, k, std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret.first;
+ template <typename K = key_type, class M, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
}
- template <class M, key_type * = nullptr, M * = nullptr>
- iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, std::move(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret.first;
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k),
+ std::forward<M>(obj));
}
- template <typename... Args>
- std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) {
- return this->tree_.insert_unique(
- k, std::piecewise_construct, std::forward_as_tuple(k),
- std::forward_as_tuple(std::forward<Args>(args)...));
+
+ template <typename K = key_type, typename... Args,
+ typename absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
}
- template <typename... Args>
- std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) {
- // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
- // and then using `k` unsequenced. This is safe because the move is into a
- // forwarding reference and insert_unique guarantees that `key` is never
- // referenced after consuming `args`.
- const key_type &key_ref = k;
- return this->tree_.insert_unique(
- key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)),
- std::forward_as_tuple(std::forward<Args>(args)...));
+ template <typename K = key_type, typename... Args,
+ typename absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
- template <typename... Args>
- iterator try_emplace(const_iterator hint, const key_type &k,
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, const key_arg<K> &k,
Args &&... args) {
- return this->tree_
- .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
- std::forward_as_tuple(k),
- std::forward_as_tuple(std::forward<Args>(args)...))
- .first;
+ return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
}
- template <typename... Args>
- iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) {
- // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
- // and then using `k` unsequenced. This is safe because the move is into a
- // forwarding reference and insert_hint_unique guarantees that `key` is
- // never referenced after consuming `args`.
- const key_type &key_ref = k;
- return this->tree_
- .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct,
- std::forward_as_tuple(std::move(k)),
- std::forward_as_tuple(std::forward<Args>(args)...))
- .first;
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+ return try_emplace_hint_impl(hint, std::forward<K>(k),
+ std::forward<Args>(args)...);
}
- mapped_type &operator[](const key_type &k) {
+
+ template <typename K = key_type>
+ mapped_type &operator[](const key_arg<K> &k) {
return try_emplace(k).first->second;
}
- mapped_type &operator[](key_type &&k) {
- return try_emplace(std::move(k)).first->second;
+ template <typename K = key_type>
+ mapped_type &operator[](key_arg<K> &&k) {
+ return try_emplace(std::forward<K>(k)).first->second;
}
template <typename K = key_type>
@@ -513,6 +474,40 @@ class btree_map_container : public btree_set_container<Tree> {
base_internal::ThrowStdOutOfRange("absl::btree_map::at");
return it->second;
}
+
+ private:
+ // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+ // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+ // `ret.second` is false.
+ template <class K, class M>
+ std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class K, class M>
+ iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
+ return this->tree_.insert_unique(
+ k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+ template <class K, class... Args>
+ iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
+ return this->tree_
+ .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...))
+ .first;
+ }
};
// A common base class for btree_multiset and btree_multimap.
@@ -562,15 +557,15 @@ class btree_multiset_container : public btree_container<Tree> {
}
// Insertion routines.
- iterator insert(const value_type &x) { return this->tree_.insert_multi(x); }
- iterator insert(value_type &&x) {
- return this->tree_.insert_multi(std::move(x));
+ iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
+ iterator insert(value_type &&v) {
+ return this->tree_.insert_multi(std::move(v));
}
- iterator insert(const_iterator position, const value_type &x) {
- return this->tree_.insert_hint_multi(iterator(position), x);
+ iterator insert(const_iterator hint, const value_type &v) {
+ return this->tree_.insert_hint_multi(iterator(hint), v);
}
- iterator insert(const_iterator position, value_type &&x) {
- return this->tree_.insert_hint_multi(iterator(position), std::move(x));
+ iterator insert(const_iterator hint, value_type &&v) {
+ return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
@@ -584,9 +579,9 @@ class btree_multiset_container : public btree_container<Tree> {
return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
}
template <typename... Args>
- iterator emplace_hint(const_iterator position, Args &&... args) {
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
return this->tree_.insert_hint_multi(
- iterator(position), init_type(std::forward<Args>(args)...));
+ iterator(hint), init_type(std::forward<Args>(args)...));
}
iterator insert(node_type &&node) {
if (!node) return this->end();
diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h
index 5037d803..030e9d4a 100644
--- a/absl/container/internal/common.h
+++ b/absl/container/internal/common.h
@@ -138,6 +138,7 @@ class node_handle<Policy, PolicyTraits, Alloc,
absl::void_t<typename Policy::mapped_type>>
: public node_handle_base<PolicyTraits, Alloc> {
using Base = node_handle_base<PolicyTraits, Alloc>;
+ using slot_type = typename PolicyTraits::slot_type;
public:
using key_type = typename Policy::key_type;
@@ -145,8 +146,11 @@ class node_handle<Policy, PolicyTraits, Alloc,
constexpr node_handle() {}
- auto key() const -> decltype(PolicyTraits::key(this->slot())) {
- return PolicyTraits::key(this->slot());
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key. Otherwise, we provide const access.
+ auto key() const
+ -> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
+ return PolicyTraits::mutable_key(this->slot());
}
mapped_type& mapped() const {
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index 4bfe92fd..02bfd03f 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -169,9 +169,33 @@ constexpr bool ShouldAnyUseBase() {
}
template <typename T, typename V>
-using TupleMoveConstructible = typename std::conditional<
- std::is_reference<T>::value, std::is_convertible<V, T>,
- std::is_constructible<T, V&&>>::type;
+using TupleElementMoveConstructible =
+ typename std::conditional<std::is_reference<T>::value,
+ std::is_convertible<V, T>,
+ std::is_constructible<T, V&&>>::type;
+
+template <bool SizeMatches, class T, class... Vs>
+struct TupleMoveConstructible : std::false_type {};
+
+template <class... Ts, class... Vs>
+struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
+ : std::integral_constant<
+ bool, absl::conjunction<
+ TupleElementMoveConstructible<Ts, Vs&&>...>::value> {};
+
+template <typename T>
+struct compressed_tuple_size;
+
+template <typename... Es>
+struct compressed_tuple_size<CompressedTuple<Es...>>
+ : public std::integral_constant<std::size_t, sizeof...(Es)> {};
+
+template <class T, class... Vs>
+struct TupleItemsMoveConstructible
+ : std::integral_constant<
+ bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
+ sizeof...(Vs),
+ T, Vs...>::value> {};
} // namespace internal_compressed_tuple
@@ -217,17 +241,18 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
explicit constexpr CompressedTuple(const Ts&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
- template <typename... Vs,
+ template <typename First, typename... Vs,
absl::enable_if_t<
absl::conjunction<
// Ensure we are not hiding default copy/move constructors.
absl::negation<std::is_same<void(CompressedTuple),
- void(absl::decay_t<Vs>...)>>,
- internal_compressed_tuple::TupleMoveConstructible<
- Ts, Vs&&>...>::value,
+ void(absl::decay_t<First>)>>,
+ internal_compressed_tuple::TupleItemsMoveConstructible<
+ CompressedTuple<Ts...>, First, Vs...>>::value,
bool> = true>
- explicit constexpr CompressedTuple(Vs&&... base)
+ explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place,
+ absl::forward<First>(first),
absl::forward<Vs>(base)...) {}
template <int I>
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 1dae12db..62a7483e 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -277,11 +277,11 @@ TEST(CompressedTupleTest, Nested) {
TEST(CompressedTupleTest, Reference) {
int i = 7;
- std::string s = "Very long std::string that goes in the heap";
+ std::string s = "Very long string that goes in the heap";
CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
// Sanity check. We should have not moved from `s`
- EXPECT_EQ(s, "Very long std::string that goes in the heap");
+ EXPECT_EQ(s, "Very long string that goes in the heap");
EXPECT_EQ(x.get<0>(), x.get<1>());
EXPECT_NE(&x.get<0>(), &x.get<1>());
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index d24b0f84..e67529ec 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -15,28 +15,34 @@
#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
-#ifdef MEMORY_SANITIZER
-#include <sanitizer/msan_interface.h>
-#endif
-
#include <cassert>
#include <cstddef>
#include <memory>
+#include <new>
#include <tuple>
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+template <size_t Alignment>
+struct alignas(Alignment) AlignedType {};
+
// Allocates at least n bytes aligned to the specified alignment.
// Alignment must be a power of 2. It must be positive.
//
@@ -48,11 +54,14 @@ template <size_t Alignment, class Alloc>
void* Allocate(Alloc* alloc, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
- struct alignas(Alignment) M {};
+ using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
- A mem_alloc(*alloc);
- void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment");
return p;
@@ -64,11 +73,14 @@ template <size_t Alignment, class Alloc>
void Deallocate(Alloc* alloc, void* p, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
- struct alignas(Alignment) M {};
+ using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
- A mem_alloc(*alloc);
- AT::deallocate(mem_alloc, static_cast<M*>(p),
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ AT::deallocate(my_mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M));
}
@@ -205,10 +217,10 @@ DecomposeValue(F&& f, Arg&& arg) {
// Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s);
#endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_poison(m, s);
#endif
(void)m;
@@ -216,10 +228,10 @@ inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
}
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_unpoison(m, s);
#endif
(void)m;
@@ -246,8 +258,8 @@ namespace memory_internal {
// type, which is non-portable.
template <class Pair, class = std::true_type>
struct OffsetOf {
- static constexpr size_t kFirst = -1;
- static constexpr size_t kSecond = -1;
+ static constexpr size_t kFirst = static_cast<size_t>(-1);
+ static constexpr size_t kSecond = static_cast<size_t>(-1);
};
template <class Pair>
@@ -316,11 +328,12 @@ union map_slot_type {
map_slot_type() {}
~map_slot_type() = delete;
using value_type = std::pair<const K, V>;
- using mutable_value_type = std::pair<K, V>;
+ using mutable_value_type =
+ std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
value_type value;
mutable_value_type mutable_value;
- K key;
+ absl::remove_const_t<K> key;
};
template <class K, class V>
@@ -346,6 +359,20 @@ struct map_slot_policy {
return slot->value;
}
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ static K& mutable_key(slot_type* slot) {
+ // Still check for kMutableKeys so that we can avoid calling std::launder
+ // unless necessary because it can interfere with optimizations.
+ return kMutableKeys::value ? slot->key
+ : *std::launder(const_cast<K*>(
+ std::addressof(slot->value.first)));
+ }
+#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
+ static const K& mutable_key(slot_type* slot) { return key(slot); }
+#endif
+
static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first;
}
@@ -424,13 +451,6 @@ struct map_slot_policy {
std::move(src->value));
}
}
-
- template <class Allocator>
- static void move(Allocator* alloc, slot_type* first, slot_type* last,
- slot_type* result) {
- for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
- move(alloc, src, dest);
- }
};
} // namespace container_internal
diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc
index 7942c7be..6a7fcd29 100644
--- a/absl/container/internal/container_memory_test.cc
+++ b/absl/container/internal/container_memory_test.cc
@@ -16,10 +16,13 @@
#include <cstdint>
#include <tuple>
+#include <typeindex>
+#include <typeinfo>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -27,6 +30,11 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
+using ::absl::test_internal::CopyableMovableInstance;
+using ::absl::test_internal::InstanceTracker;
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Gt;
using ::testing::Pair;
TEST(Memory, AlignmentLargerThanBase) {
@@ -45,6 +53,39 @@ TEST(Memory, AlignmentSmallerThanBase) {
Deallocate<2>(&alloc, mem, 3);
}
+std::map<std::type_index, int>& AllocationMap() {
+ static auto* map = new std::map<std::type_index, int>;
+ return *map;
+}
+
+template <typename T>
+struct TypeCountingAllocator {
+ TypeCountingAllocator() = default;
+ template <typename U>
+ TypeCountingAllocator(const TypeCountingAllocator<U>&) {} // NOLINT
+
+ using value_type = T;
+
+ T* allocate(size_t n, const void* = nullptr) {
+ AllocationMap()[typeid(T)] += n;
+ return std::allocator<T>().allocate(n);
+ }
+ void deallocate(T* p, std::size_t n) {
+ AllocationMap()[typeid(T)] -= n;
+ return std::allocator<T>().deallocate(p, n);
+ }
+};
+
+TEST(Memory, AllocateDeallocateMatchType) {
+ TypeCountingAllocator<int> alloc;
+ void* mem = Allocate<1>(&alloc, 1);
+ // Verify that it was allocated
+ EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0))));
+ Deallocate<1>(&alloc, mem, 1);
+ // Verify that the deallocation matched.
+ EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0)));
+}
+
class Fixture : public ::testing::Test {
using Alloc = std::allocator<std::string>;
@@ -184,6 +225,31 @@ TEST(DecomposePair, NotDecomposable) {
std::make_tuple(0.5)));
}
+TEST(MapSlotPolicy, ConstKeyAndValue) {
+ using slot_policy = map_slot_policy<const CopyableMovableInstance,
+ const CopyableMovableInstance>;
+ using slot_type = typename slot_policy::slot_type;
+
+ union Slots {
+ Slots() {}
+ ~Slots() {}
+ slot_type slots[100];
+ } slots;
+
+ std::allocator<
+ std::pair<const CopyableMovableInstance, const CopyableMovableInstance>>
+ alloc;
+ InstanceTracker tracker;
+ slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1),
+ CopyableMovableInstance(1));
+ for (int i = 0; i < 99; ++i) {
+ slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]);
+ }
+ slot_policy::destroy(&alloc, &slots.slots[99]);
+
+ EXPECT_EQ(tracker.copies(), 0);
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
index 9efdc662..927cf082 100644
--- a/absl/container/internal/counting_allocator.h
+++ b/absl/container/internal/counting_allocator.h
@@ -15,7 +15,6 @@
#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
-#include <cassert>
#include <cstdint>
#include <memory>
@@ -31,33 +30,63 @@ namespace container_internal {
// containers - that chain of allocators uses the same state and is
// thus easier to query for aggregate allocation information.
template <typename T>
-class CountingAllocator : public std::allocator<T> {
+class CountingAllocator {
public:
- using Alloc = std::allocator<T>;
- using pointer = typename Alloc::pointer;
- using size_type = typename Alloc::size_type;
+ using Allocator = std::allocator<T>;
+ using AllocatorTraits = std::allocator_traits<Allocator>;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
- CountingAllocator() : bytes_used_(nullptr) {}
- explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+ CountingAllocator() = default;
+ explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
+ CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
+ : bytes_used_(bytes_used), instance_count_(instance_count) {}
template <typename U>
CountingAllocator(const CountingAllocator<U>& x)
- : Alloc(x), bytes_used_(x.bytes_used_) {}
+ : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
- pointer allocate(size_type n,
- std::allocator<void>::const_pointer hint = nullptr) {
- assert(bytes_used_ != nullptr);
- *bytes_used_ += n * sizeof(T);
- return Alloc::allocate(n, hint);
+ pointer allocate(
+ size_type n,
+ typename AllocatorTraits::const_void_pointer hint = nullptr) {
+ Allocator allocator;
+ pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ += n * sizeof(T);
+ }
+ return ptr;
}
void deallocate(pointer p, size_type n) {
- Alloc::deallocate(p, n);
- assert(bytes_used_ != nullptr);
- *bytes_used_ -= n * sizeof(T);
+ Allocator allocator;
+ AllocatorTraits::deallocate(allocator, p, n);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ -= n * sizeof(T);
+ }
}
- template<typename U>
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ Allocator allocator;
+ AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
+ if (instance_count_ != nullptr) {
+ *instance_count_ += 1;
+ }
+ }
+
+ template <typename U>
+ void destroy(U* p) {
+ Allocator allocator;
+ AllocatorTraits::destroy(allocator, p);
+ if (instance_count_ != nullptr) {
+ *instance_count_ -= 1;
+ }
+ }
+
+ template <typename U>
class rebind {
public:
using other = CountingAllocator<U>;
@@ -65,7 +94,8 @@ class CountingAllocator : public std::allocator<T> {
friend bool operator==(const CountingAllocator& a,
const CountingAllocator& b) {
- return a.bytes_used_ == b.bytes_used_;
+ return a.bytes_used_ == b.bytes_used_ &&
+ a.instance_count_ == b.instance_count_;
}
friend bool operator!=(const CountingAllocator& a,
@@ -73,7 +103,8 @@ class CountingAllocator : public std::allocator<T> {
return !(a == b);
}
- int64_t* bytes_used_;
+ int64_t* bytes_used_ = nullptr;
+ int64_t* instance_count_ = nullptr;
};
} // namespace container_internal
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
index 401ddf4d..0683422a 100644
--- a/absl/container/internal/hash_function_defaults.h
+++ b/absl/container/internal/hash_function_defaults.h
@@ -53,6 +53,7 @@
#include "absl/base/config.h"
#include "absl/hash/hash.h"
+#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -72,6 +73,9 @@ struct StringHash {
size_t operator()(absl::string_view v) const {
return absl::Hash<absl::string_view>{}(v);
}
+ size_t operator()(const absl::Cord& v) const {
+ return absl::Hash<absl::Cord>{}(v);
+ }
};
// Supports heterogeneous lookup for string-like elements.
@@ -82,6 +86,15 @@ struct StringHashEq {
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs == rhs;
}
+ bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
};
};
@@ -89,6 +102,8 @@ template <>
struct HashEq<std::string> : StringHashEq {};
template <>
struct HashEq<absl::string_view> : StringHashEq {};
+template <>
+struct HashEq<absl::Cord> : StringHashEq {};
// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index 2eefc7e0..59576b8e 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -19,6 +19,9 @@
#include <utility>
#include "gtest/gtest.h"
+#include "absl/random/random.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -203,10 +206,91 @@ TYPED_TEST(HashPointer, Works) {
EXPECT_NE(hash(&dummy), hash(cuptr));
}
+TEST(EqCord, Works) {
+ hash_default_eq<absl::Cord> eq;
+ const absl::string_view a_string_view = "a";
+ const absl::Cord a_cord(a_string_view);
+ const absl::string_view b_string_view = "b";
+ const absl::Cord b_cord(b_string_view);
+
+ EXPECT_TRUE(eq(a_cord, a_cord));
+ EXPECT_TRUE(eq(a_cord, a_string_view));
+ EXPECT_TRUE(eq(a_string_view, a_cord));
+ EXPECT_FALSE(eq(a_cord, b_cord));
+ EXPECT_FALSE(eq(a_cord, b_string_view));
+ EXPECT_FALSE(eq(b_string_view, a_cord));
+}
+
+TEST(HashCord, Works) {
+ hash_default_hash<absl::Cord> hash;
+ const absl::string_view a_string_view = "a";
+ const absl::Cord a_cord(a_string_view);
+ const absl::string_view b_string_view = "b";
+ const absl::Cord b_cord(b_string_view);
+
+ EXPECT_EQ(hash(a_cord), hash(a_cord));
+ EXPECT_EQ(hash(b_cord), hash(b_cord));
+ EXPECT_EQ(hash(a_string_view), hash(a_cord));
+ EXPECT_EQ(hash(b_string_view), hash(b_cord));
+ EXPECT_EQ(hash(absl::Cord("")), hash(""));
+ EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view()));
+
+ EXPECT_NE(hash(a_cord), hash(b_cord));
+ EXPECT_NE(hash(a_cord), hash(b_string_view));
+ EXPECT_NE(hash(a_string_view), hash(b_cord));
+ EXPECT_NE(hash(a_string_view), hash(b_string_view));
+}
+
+void NoOpReleaser(absl::string_view data, void* arg) {}
+
+TEST(HashCord, FragmentedCordWorks) {
+ hash_default_hash<absl::Cord> hash;
+ absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"});
+ EXPECT_FALSE(c.TryFlat().has_value());
+ EXPECT_EQ(hash(c), hash("abc"));
+}
+
+TEST(HashCord, FragmentedLongCordWorks) {
+ hash_default_hash<absl::Cord> hash;
+ // Crete some large strings which do not fit on the stack.
+ std::string a(65536, 'a');
+ std::string b(65536, 'b');
+ absl::Cord c = absl::MakeFragmentedCord({a, b});
+ EXPECT_FALSE(c.TryFlat().has_value());
+ EXPECT_EQ(hash(c), hash(a + b));
+}
+
+TEST(HashCord, RandomCord) {
+ hash_default_hash<absl::Cord> hash;
+ auto bitgen = absl::BitGen();
+ for (int i = 0; i < 1000; ++i) {
+ const int number_of_segments = absl::Uniform(bitgen, 0, 10);
+ std::vector<std::string> pieces;
+ for (size_t s = 0; s < number_of_segments; ++s) {
+ std::string str;
+ str.resize(absl::Uniform(bitgen, 0, 4096));
+ // MSVC needed the explicit return type in the lambda.
+ std::generate(str.begin(), str.end(), [&]() -> char {
+ return static_cast<char>(absl::Uniform<unsigned char>(bitgen));
+ });
+ pieces.push_back(str);
+ }
+ absl::Cord c = absl::MakeFragmentedCord(pieces);
+ EXPECT_EQ(hash(c), hash(std::string(c)));
+ }
+}
+
// Cartesian product of (std::string, absl::string_view)
-// with (std::string, absl::string_view, const char*).
+// with (std::string, absl::string_view, const char*, absl::Cord).
using StringTypesCartesianProduct = Types<
// clang-format off
+ std::pair<absl::Cord, std::string>,
+ std::pair<absl::Cord, absl::string_view>,
+ std::pair<absl::Cord, absl::Cord>,
+ std::pair<absl::Cord, const char*>,
+
+ std::pair<std::string, absl::Cord>,
+ std::pair<absl::string_view, absl::Cord>,
std::pair<absl::string_view, std::string>,
std::pair<absl::string_view, absl::string_view>,
@@ -253,11 +337,11 @@ ABSL_NAMESPACE_END
} // namespace absl
enum Hash : size_t {
- kStd = 0x2, // std::hash
+ kStd = 0x1, // std::hash
#ifdef _MSC_VER
kExtension = kStd, // In MSVC, std::hash == ::hash
#else // _MSC_VER
- kExtension = 0x4, // ::hash (GCC extension)
+ kExtension = 0x2, // ::hash (GCC extension)
#endif // _MSC_VER
};
diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc
index 75c4db6c..59cc5aac 100644
--- a/absl/container/internal/hash_generator_testing.cc
+++ b/absl/container/internal/hash_generator_testing.cc
@@ -41,8 +41,10 @@ class RandomDeviceSeedSeq {
} // namespace
std::mt19937_64* GetSharedRng() {
- RandomDeviceSeedSeq seed_seq;
- static auto* rng = new std::mt19937_64(seed_seq);
+ static auto* rng = [] {
+ RandomDeviceSeedSeq seed_seq;
+ return new std::mt19937_64(seed_seq);
+ }();
return rng;
}
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h
index 3e1209c6..46c97b18 100644
--- a/absl/container/internal/hash_policy_traits.h
+++ b/absl/container/internal/hash_policy_traits.h
@@ -17,6 +17,7 @@
#include <cstddef>
#include <memory>
+#include <new>
#include <type_traits>
#include <utility>
@@ -29,15 +30,34 @@ namespace container_internal {
// Defines how slots are initialized/destroyed/moved.
template <class Policy, class = void>
struct hash_policy_traits {
+ // The type of the keys stored in the hashtable.
+ using key_type = typename Policy::key_type;
+
private:
struct ReturnKey {
- // We return `Key` here.
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ template <class Key,
+ absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
+ static key_type& Impl(Key&& k, int) {
+ return *std::launder(
+ const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
+ }
+#endif
+
+ template <class Key>
+ static Key Impl(Key&& k, char) {
+ return std::forward<Key>(k);
+ }
+
// When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map.
template <class Key, class... Args>
- Key operator()(Key&& k, const Args&...) const {
- return std::forward<Key>(k);
+ auto operator()(Key&& k, const Args&...) const
+ -> decltype(Impl(std::forward<Key>(k), 0)) {
+ return Impl(std::forward<Key>(k), 0);
}
};
@@ -52,9 +72,6 @@ struct hash_policy_traits {
// The actual object stored in the hash table.
using slot_type = typename Policy::slot_type;
- // The type of the keys stored in the hashtable.
- using key_type = typename Policy::key_type;
-
// The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details.
@@ -156,7 +173,7 @@ struct hash_policy_traits {
// Returns the "key" portion of the slot.
// Used for node handle manipulation.
template <class P = Policy>
- static auto key(slot_type* slot)
+ static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot))) {
return P::apply(ReturnKey(), element(slot));
}
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 56447251..e4484fbb 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -67,6 +67,7 @@ void HashtablezInfo::PrepareForSampling() {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
+ num_rehashes.store(0, std::memory_order_relaxed);
max_probe_length.store(0, std::memory_order_relaxed);
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);
@@ -226,7 +227,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
// SwissTables probe in groups of 16, so scale this to count items probes and
// not offset from desired.
size_t probe_length = distance_from_desired;
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 34d5e572..394348da 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -73,6 +73,7 @@ struct HashtablezInfo {
std::atomic<size_t> capacity;
std::atomic<size_t> size;
std::atomic<size_t> num_erases;
+ std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
@@ -98,13 +99,18 @@ struct HashtablezInfo {
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
#endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_rehashes.store(
+ 1 + info->num_rehashes.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
}
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
@@ -113,7 +119,8 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) {
// This is a clear, reset the total/num_erases too.
- RecordRehashSlow(info, 0);
+ info->total_probe_length.store(0, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
}
}
@@ -122,12 +129,21 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
inline void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed);
- info->num_erases.fetch_add(1, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_erases.store(
+ 1 + info->num_erases.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
}
HashtablezInfo* SampleSlow(int64_t* next_sample);
void UnsampleSlow(HashtablezInfo* info);
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() : info_(nullptr) {}
@@ -179,19 +195,27 @@ class HashtablezInfoHandle {
friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_;
};
+#else
+// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
+// be removed by the linker, in order to reduce the binary size.
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() = default;
+ explicit HashtablezInfoHandle(std::nullptr_t) {}
-#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
-#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
+ inline void RecordRehash(size_t /*total_probe_length*/) {}
+ inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
+ inline void RecordErase() {}
-#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \
- !defined(ABSL_CONSUME_DLL)
-#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE
-#endif
+ friend inline void swap(HashtablezInfoHandle& /*lhs*/,
+ HashtablezInfoHandle& /*rhs*/) {}
+};
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
-#endif // ABSL_PER_THREAD_TLS
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 36f5ccdd..8d10a1e9 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -29,7 +29,7 @@
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
constexpr int kProbeLength = 16;
#else
constexpr int kProbeLength = 8;
@@ -38,6 +38,7 @@ constexpr int kProbeLength = 8;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer {
public:
static bool IsSampled(const HashtablezInfoHandle& h) {
@@ -46,6 +47,13 @@ class HashtablezInfoHandlePeer {
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
};
+#else
+class HashtablezInfoHandlePeer {
+ public:
+ static bool IsSampled(const HashtablezInfoHandle&) { return false; }
+ static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
+};
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
namespace {
using ::absl::synchronization_internal::ThreadPool;
@@ -76,6 +84,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@@ -95,6 +104,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
@@ -167,9 +177,10 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.size.load(), 2);
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 1);
}
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
@@ -213,7 +224,6 @@ TEST(HashtablezSamplerTest, Sample) {
}
EXPECT_NEAR(sample_rate, 0.01, 0.005);
}
-#endif
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = HashtablezSampler::Global();
@@ -243,6 +253,8 @@ TEST(HashtablezSamplerTest, Handle) {
});
EXPECT_FALSE(found);
}
+#endif
+
TEST(HashtablezSamplerTest, Registration) {
HashtablezSampler sampler;
diff --git a/absl/container/internal/have_sse.h b/absl/container/internal/have_sse.h
index 43414418..e75e1a16 100644
--- a/absl/container/internal/have_sse.h
+++ b/absl/container/internal/have_sse.h
@@ -16,33 +16,34 @@
#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#ifndef SWISSTABLE_HAVE_SSE2
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#if defined(__SSE2__) || \
(defined(_MSC_VER) && \
(defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define SWISSTABLE_HAVE_SSE2 1
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
#else
-#define SWISSTABLE_HAVE_SSE2 0
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
#endif
#endif
-#ifndef SWISSTABLE_HAVE_SSSE3
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
#ifdef __SSSE3__
-#define SWISSTABLE_HAVE_SSSE3 1
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
#else
-#define SWISSTABLE_HAVE_SSSE3 0
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
#endif
#endif
-#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
+ !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#error "Bad configuration!"
#endif
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#include <emmintrin.h>
#endif
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
#include <tmmintrin.h>
#endif
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index 69cc85dd..23367833 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -163,6 +163,7 @@
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
+
#include <ostream>
#include <string>
#include <tuple>
@@ -170,15 +171,16 @@
#include <typeinfo>
#include <utility>
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
+#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
#if defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
#endif
@@ -614,7 +616,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds");
(void)p;
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index 8f3628a1..757272f1 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -17,6 +17,7 @@
// We need ::max_align_t because some libstdc++ versions don't provide
// std::max_align_t
#include <stddef.h>
+
#include <cstdint>
#include <memory>
#include <sstream>
@@ -24,6 +25,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/types/span.h"
@@ -1314,7 +1316,7 @@ struct Region {
};
void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
for (size_t i = 0; i != n; ++i) {
EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
}
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index ca7be8d8..ec13a2f7 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -104,6 +104,7 @@
#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
@@ -121,6 +122,16 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+template <typename AllocType>
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+ std::true_type /* propagate_on_container_swap */) {
+ using std::swap;
+ swap(lhs, rhs);
+}
+template <typename AllocType>
+void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
+ std::false_type /* propagate_on_container_swap */) {}
+
template <size_t Width>
class probe_seq {
public:
@@ -168,10 +179,14 @@ struct IsDecomposable<
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
-constexpr bool IsNoThrowSwappable() {
+constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
}
+template <class T>
+constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
+ return false;
+}
template <typename T>
int TrailingZeros(T x) {
@@ -312,7 +327,7 @@ inline bool IsFull(ctrl_t c) { return c >= 0; }
inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -346,7 +361,7 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of empty slots.
BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
// This only works because kEmpty is -128.
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
@@ -372,7 +387,7 @@ struct GroupSse2Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -384,7 +399,7 @@ struct GroupSse2Impl {
__m128i ctrl;
};
-#endif // SWISSTABLE_HAVE_SSE2
+#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -438,7 +453,7 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
using Group = GroupSse2Impl;
#else
using Group = GroupPortableImpl;
@@ -496,6 +511,18 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
}
+inline void AssertIsFull(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
+inline void AssertIsValid(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -510,7 +537,8 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
-// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
+// Allocator: an Allocator
+// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
@@ -616,7 +644,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- assert_is_full();
+ AssertIsFull(ctrl_);
return PolicyTraits::element(slot_);
}
@@ -625,7 +653,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- assert_is_full();
+ AssertIsFull(ctrl_);
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -639,8 +667,8 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
- a.assert_is_valid();
- b.assert_is_valid();
+ AssertIsValid(a.ctrl_);
+ AssertIsValid(b.ctrl_);
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -648,24 +676,19 @@ class raw_hash_set {
}
private:
- iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
- iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
-
- void assert_is_full() const { assert(IsFull(*ctrl_)); }
- void assert_is_valid() const {
- assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel);
+ iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+ // This assumption helps the compiler know that any non-end iterator is
+ // not equal to any end iterator.
+ ABSL_INTERNAL_ASSUME(ctrl != nullptr);
}
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
- // ctrl is not necessarily aligned to Group::kWidth. It is also likely
- // to read past the space for ctrl bytes and into slots. This is ok
- // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
- // is no way to read outside the combined slot array.
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
+ if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
}
ctrl_t* ctrl_ = nullptr;
@@ -907,12 +930,12 @@ class raw_hash_set {
it.skip_empty_or_deleted();
return it;
}
- iterator end() { return {ctrl_ + capacity_}; }
+ iterator end() { return {}; }
const_iterator begin() const {
return const_cast<raw_hash_set*>(this)->begin();
}
- const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
+ const_iterator end() const { return {}; }
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
@@ -1171,7 +1194,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- it.assert_is_full();
+ AssertIsFull(it.ctrl_);
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1205,7 +1228,7 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- position.inner_.assert_is_full();
+ AssertIsFull(position.inner_.ctrl_);
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1222,8 +1245,8 @@ class raw_hash_set {
void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
- (!AllocTraits::propagate_on_container_swap::value ||
- IsNoThrowSwappable<allocator_type>())) {
+ IsNoThrowSwappable<allocator_type>(
+ typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_);
@@ -1233,12 +1256,8 @@ class raw_hash_set {
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
swap(infoz_, that.infoz_);
- if (AllocTraits::propagate_on_container_swap::value) {
- swap(alloc_ref(), that.alloc_ref());
- } else {
- // If the allocators do not compare equal it is officially undefined
- // behavior. We choose to do nothing.
- }
+ SwapAlloc(alloc_ref(), that.alloc_ref(),
+ typename AllocTraits::propagate_on_container_swap{});
}
void rehash(size_t n) {
@@ -1308,6 +1327,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
}
template <class K = key_type>
@@ -1659,8 +1679,8 @@ class raw_hash_set {
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
- assert(seq.index() < capacity_ && "full table!");
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
}
@@ -1691,6 +1711,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
+ assert(seq.index() < capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index 7ac4b9f7..1a036085 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -424,6 +424,77 @@ TEST_F(PropagateOnAll, Swap) {
EXPECT_EQ(0, it->num_copies());
}
+// This allocator is similar to std::pmr::polymorphic_allocator.
+// Note the disabled assignment.
+template <class T>
+class PAlloc {
+ template <class>
+ friend class PAlloc;
+
+ public:
+ // types
+ using value_type = T;
+
+ // traits
+ using propagate_on_container_swap = std::false_type;
+
+ PAlloc() noexcept = default;
+ explicit PAlloc(size_t id) noexcept : id_(id) {}
+ PAlloc(const PAlloc&) noexcept = default;
+ PAlloc& operator=(const PAlloc&) noexcept = delete;
+
+ template <class U>
+ PAlloc(const PAlloc<U>& that) noexcept : id_(that.id_) {} // NOLINT
+
+ template <class U>
+ struct rebind {
+ using other = PAlloc<U>;
+ };
+
+ constexpr PAlloc select_on_container_copy_construction() const { return {}; }
+
+ // public member functions
+ T* allocate(size_t) { return new T; }
+ void deallocate(T* p, size_t) { delete p; }
+
+ friend bool operator==(const PAlloc& a, const PAlloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); }
+
+ private:
+ size_t id_ = std::numeric_limits<size_t>::max();
+};
+
+TEST(NoPropagateOn, Swap) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(PA{2});
+ swap(t1, t2);
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+
+TEST(NoPropagateOn, CopyConstruct) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(t1);
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA());
+}
+
+TEST(NoPropagateOn, Assignment) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(PA{2});
+ t1 = t2;
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index a96ae68a..f5ae83c4 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -26,6 +26,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h"
@@ -1666,9 +1667,9 @@ TEST(Nodes, EmptyNodeType) {
}
TEST(Nodes, ExtractInsert) {
- constexpr char k0[] = "Very long std::string zero.";
- constexpr char k1[] = "Very long std::string one.";
- constexpr char k2[] = "Very long std::string two.";
+ constexpr char k0[] = "Very long string zero.";
+ constexpr char k1[] = "Very long string one.";
+ constexpr char k2[] = "Very long string two.";
StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
EXPECT_THAT(t,
UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
@@ -1791,11 +1792,11 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- constexpr char kDeathMsg[] = "IsFull";
+ constexpr char kDeathMsg[] = "Invalid operation on iterator";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest, Sample) {
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
@@ -1816,7 +1817,7 @@ TEST(RawHashSamplerTest, Sample) {
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
}
-#endif // ABSL_HASHTABLEZ_SAMPLER
+#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
// Enable the feature even if the prod default is off.
@@ -1839,7 +1840,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
0.00, 0.001);
}
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
t.reserve(5);
@@ -1863,7 +1864,7 @@ TEST(Sanitizer, PoisoningOnErase) {
t.erase(0);
EXPECT_TRUE(__asan_address_is_poisoned(&v));
}
-#endif // ADDRESS_SANITIZER
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
} // namespace
} // namespace container_internal
diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h
index b8c513f1..8c9ca779 100644
--- a/absl/container/internal/unordered_map_modifiers_test.h
+++ b/absl/container/internal/unordered_map_modifiers_test.h
@@ -286,6 +286,8 @@ class UniquePtrModifiersTest : public ::testing::Test {
}
};
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);
+
TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
// Test that we do not move from rvalue arguments if an insertion does not
diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h
index fccea184..7a39f628 100644
--- a/absl/container/node_hash_map.h
+++ b/absl/container/node_hash_map.h
@@ -225,7 +225,8 @@ class node_hash_map
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// node_hash_map::insert()
@@ -374,6 +375,11 @@ class node_hash_map
// key value and returns a node handle owning that extracted data. If the
// `node_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
using Base::extract;
// node_hash_map::merge()
@@ -514,12 +520,6 @@ class node_hash_map
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
-
- ABSL_DEPRECATED("Call `hash_function()` instead.")
- typename Base::hasher hash_funct() { return this->hash_function(); }
-
- ABSL_DEPRECATED("Call `rehash()` instead.")
- void resize(typename Base::size_type hint) { this->rehash(hint); }
};
// erase_if(node_hash_map<>, Pred)
diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc
index 5d74b814..8f59a1e4 100644
--- a/absl/container/node_hash_map_test.cc
+++ b/absl/container/node_hash_map_test.cc
@@ -254,6 +254,21 @@ TEST(NodeHashMap, EraseIf) {
}
}
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(NodeHashMap, NodeHandleMutableKeyAccess) {
+ node_hash_map<std::string, std::string> map;
+
+ map["key1"] = "mapped";
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
+}
+#endif
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h
index ad54b6dc..56ce3b66 100644
--- a/absl/container/node_hash_set.h
+++ b/absl/container/node_hash_set.h
@@ -217,7 +217,8 @@ class node_hash_set
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// node_hash_set::insert()
@@ -427,12 +428,6 @@ class node_hash_set
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
-
- ABSL_DEPRECATED("Call `hash_function()` instead.")
- typename Base::hasher hash_funct() { return this->hash_function(); }
-
- ABSL_DEPRECATED("Call `rehash()` instead.")
- void resize(typename Base::size_type hint) { this->rehash(hint); }
};
// erase_if(node_hash_set<>, Pred)