diff options
Diffstat (limited to 'absl/container')
67 files changed, 8980 insertions, 564 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 9e2a5b1e..f2217140 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -14,12 +14,11 @@ # limitations under the License. # +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", "ABSL_DEFAULT_LINKOPTS", - "ABSL_EXCEPTIONS_FLAG", - "ABSL_EXCEPTIONS_FLAG_LINKOPTS", "ABSL_TEST_COPTS", ) @@ -71,20 +70,6 @@ cc_library( cc_test( name = "fixed_array_test", srcs = ["fixed_array_test.cc"], - copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, - linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS, - deps = [ - ":fixed_array", - "//absl/base:exception_testing", - "//absl/hash:hash_testing", - "//absl/memory", - "@com_google_googletest//:gtest_main", - ], -) - -cc_test( - name = "fixed_array_test_noexceptions", - srcs = ["fixed_array_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ @@ -99,10 +84,11 @@ cc_test( cc_test( name = "fixed_array_exception_safety_test", srcs = ["fixed_array_exception_safety_test.cc"], - copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, - linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS, + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":fixed_array", + "//absl/base:config", "//absl/base:exception_safety_testing", "@com_google_googletest//:gtest_main", ], @@ -155,39 +141,21 @@ cc_library( copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//visibility:private"], + deps = ["//absl/base:config"], ) cc_test( name = "inlined_vector_test", srcs = ["inlined_vector_test.cc"], - copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, - linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS, - deps = [ - ":counting_allocator", - ":inlined_vector", - ":test_instance_tracker", - "//absl/base", - "//absl/base:core_headers", - "//absl/base:exception_testing", - "//absl/hash:hash_testing", - "//absl/memory", - "//absl/strings", - "@com_google_googletest//:gtest_main", - ], -) - -cc_test( - name = "inlined_vector_test_noexceptions", - srcs = ["inlined_vector_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":counting_allocator", ":inlined_vector", ":test_instance_tracker", - "//absl/base", "//absl/base:core_headers", "//absl/base:exception_testing", + "//absl/base:raw_logging_internal", "//absl/hash:hash_testing", "//absl/memory", "//absl/strings", @@ -203,8 +171,8 @@ cc_test( tags = ["benchmark"], deps = [ ":inlined_vector", - "//absl/base", "//absl/base:core_headers", + "//absl/base:raw_logging_internal", "//absl/strings", "@com_github_google_benchmark//:benchmark_main", ], @@ -213,9 +181,10 @@ cc_test( cc_test( name = "inlined_vector_exception_safety_test", srcs = ["inlined_vector_exception_safety_test.cc"], - copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, + copts = ABSL_TEST_COPTS, deps = [ ":inlined_vector", + "//absl/base:config", "//absl/base:exception_safety_testing", "@com_google_googletest//:gtest_main", ], @@ -447,6 +416,7 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_testing", + "//absl/memory", "//absl/meta:type_traits", "//absl/strings", ], @@ -509,6 +479,9 @@ cc_library( hdrs = ["internal/hashtable_debug_hooks.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], ) cc_library( @@ -524,6 +497,7 @@ cc_library( ":have_sse", "//absl/base", "//absl/base:core_headers", + "//absl/base:exponential_biased", "//absl/debugging:stacktrace", "//absl/memory", "//absl/synchronization", @@ -551,6 +525,7 @@ cc_library( hdrs = ["internal/node_hash_policy.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, + deps = ["//absl/base:config"], ) cc_test( @@ -635,6 +610,7 @@ cc_test( ":raw_hash_set", "//absl/base", "//absl/base:core_headers", + "//absl/base:raw_logging_internal", "//absl/strings", "@com_google_googletest//:gtest_main", ], @@ -678,8 +654,8 @@ cc_test( visibility = ["//visibility:private"], deps = [ ":layout", - "//absl/base", "//absl/base:core_headers", + "//absl/base:raw_logging_internal", "//absl/types:span", "@com_google_googletest//:gtest_main", ], @@ -691,6 +667,9 @@ cc_library( hdrs = ["internal/tracked.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], ) cc_library( @@ -825,3 +804,99 @@ cc_test( "@com_google_googletest//:gtest_main", ], ) + +cc_library( + name = "btree", + srcs = [ + "internal/btree.h", + "internal/btree_container.h", + ], + hdrs = [ + "btree_map.h", + "btree_set.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:public"], + deps = [ + ":common", + ":compressed_tuple", + ":container_memory", + ":layout", + "//absl/base:core_headers", + "//absl/base:throw_delegate", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:compare", + "//absl/utility", + ], +) + +cc_library( + name = "btree_test_common", + testonly = 1, + hdrs = ["btree_test.h"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":flat_hash_set", + "//absl/strings", + "//absl/time", + ], +) + +cc_test( + name = "btree_test", + size = "large", + srcs = [ + "btree_test.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + shard_count = 10, + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":btree_test_common", + ":counting_allocator", + ":test_instance_tracker", + "//absl/base:core_headers", + "//absl/base:raw_logging_internal", + "//absl/flags:flag", + "//absl/hash:hash_testing", + "//absl/memory", + "//absl/meta:type_traits", + "//absl/strings", + "//absl/types:compare", + "@com_google_googletest//:gtest_main", + ], +) + +cc_binary( + name = "btree_benchmark", + testonly = 1, + srcs = [ + "btree_benchmark.cc", + ], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":btree", + ":btree_test_common", + ":flat_hash_map", + ":flat_hash_set", + ":hashtable_debug", + "//absl/base:raw_logging_internal", + "//absl/flags:flag", + "//absl/hash", + "//absl/memory", + "//absl/strings:str_format", + "//absl/time", + "@com_github_google_benchmark//:benchmark_main", + ], +) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 7988b12f..e702ba85 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -25,6 +25,73 @@ absl_cc_library( absl_cc_library( NAME + btree + HDRS + "btree_map.h" + "btree_set.h" + "internal/btree.h" + "internal/btree_container.h" + COPTS + ${ABSL_DEFAULT_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::container_common + absl::compare + absl::compressed_tuple + absl::container_memory + absl::core_headers + absl::layout + absl::memory + absl::strings + absl::throw_delegate + absl::type_traits + absl::utility +) + +absl_cc_library( + NAME + btree_test_common + hdrs + "btree_test.h" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::btree + absl::flat_hash_set + absl::strings + absl::time + TESTONLY +) + +absl_cc_test( + NAME + btree_test + SRCS + "btree_test.cc" + COPTS + ${ABSL_TEST_COPTS} + LINKOPTS + ${ABSL_DEFAULT_LINKOPTS} + DEPS + absl::btree + absl::btree_test_common + absl::compare + absl::core_headers + absl::counting_allocator + absl::flags + absl::hash_testing + absl::raw_logging_internal + absl::strings + absl::test_instance_tracker + absl::type_traits + gmock_main +) + +absl_cc_library( + NAME compressed_tuple HDRS "internal/compressed_tuple.h" @@ -76,24 +143,6 @@ absl_cc_test( "fixed_array_test.cc" COPTS ${ABSL_TEST_COPTS} - ${ABSL_EXCEPTIONS_FLAG} - LINKOPTS - ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} - DEPS - absl::fixed_array - absl::exception_testing - absl::hash_testing - absl::memory - gmock_main -) - -absl_cc_test( - NAME - fixed_array_test_noexceptions - SRCS - "fixed_array_test.cc" - COPTS - ${ABSL_TEST_COPTS} DEPS absl::fixed_array absl::exception_testing @@ -109,11 +158,9 @@ absl_cc_test( "fixed_array_exception_safety_test.cc" COPTS ${ABSL_TEST_COPTS} - ${ABSL_EXCEPTIONS_FLAG} - LINKOPTS - ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} DEPS absl::fixed_array + absl::config absl::exception_safety_testing gmock_main ) @@ -157,6 +204,8 @@ absl_cc_library( "internal/counting_allocator.h" COPTS ${ABSL_DEFAULT_COPTS} + DEPS + absl::config ) absl_cc_test( @@ -166,37 +215,15 @@ absl_cc_test( "inlined_vector_test.cc" COPTS ${ABSL_TEST_COPTS} - ${ABSL_EXCEPTIONS_FLAG} - LINKOPTS - ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} DEPS absl::counting_allocator absl::inlined_vector absl::test_instance_tracker - absl::base - absl::core_headers - absl::exception_testing - absl::hash_testing - absl::memory - absl::strings - gmock_main -) - -absl_cc_test( - NAME - inlined_vector_test_noexceptions - SRCS - "inlined_vector_test.cc" - COPTS - ${ABSL_TEST_COPTS} - DEPS - absl::inlined_vector - absl::test_instance_tracker - absl::base absl::core_headers absl::exception_testing absl::hash_testing absl::memory + absl::raw_logging_internal absl::strings gmock_main ) @@ -208,11 +235,9 @@ absl_cc_test( "inlined_vector_exception_safety_test.cc" COPTS ${ABSL_TEST_COPTS} - ${ABSL_EXCEPTIONS_FLAG} - LINKOPTS - ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} DEPS absl::inlined_vector + absl::config absl::exception_safety_testing gmock_main ) @@ -448,6 +473,7 @@ absl_cc_library( ${ABSL_TEST_COPTS} DEPS absl::hash_policy_testing + absl::memory absl::meta absl::strings TESTONLY @@ -514,6 +540,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::base + absl::exponential_biased absl::have_sse absl::synchronization ) @@ -549,6 +576,8 @@ absl_cc_library( "internal/hashtable_debug_hooks.h" COPTS ${ABSL_DEFAULT_COPTS} + DEPS + absl::config PUBLIC ) @@ -568,6 +597,8 @@ absl_cc_library( "internal/node_hash_policy.h" COPTS ${ABSL_DEFAULT_COPTS} + DEPS + absl::config PUBLIC ) @@ -602,7 +633,7 @@ absl_cc_library( NAME container_common HDRS - "internal/commom.h" + "internal/common.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -653,6 +684,7 @@ absl_cc_test( absl::raw_hash_set absl::base absl::core_headers + absl::raw_logging_internal absl::strings gmock_main ) @@ -696,8 +728,8 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::layout - absl::base absl::core_headers + absl::raw_logging_internal absl::span gmock_main ) @@ -709,6 +741,8 @@ absl_cc_library( "internal/tracked.h" COPTS ${ABSL_TEST_COPTS} + DEPS + absl::config TESTONLY ) diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc new file mode 100644 index 00000000..4af92f9f --- /dev/null +++ b/absl/container/btree_benchmark.cc @@ -0,0 +1,707 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <stdint.h> + +#include <algorithm> +#include <functional> +#include <map> +#include <numeric> +#include <random> +#include <set> +#include <string> +#include <type_traits> +#include <unordered_map> +#include <unordered_set> +#include <vector> + +#include "absl/base/internal/raw_logging.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/btree_test.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/flags/flag.h" +#include "absl/hash/hash.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_format.h" +#include "absl/time/time.h" +#include "benchmark/benchmark.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +constexpr size_t kBenchmarkValues = 1 << 20; + +// How many times we add and remove sub-batches in one batch of *AddRem +// benchmarks. +constexpr size_t kAddRemBatchSize = 1 << 2; + +// Generates n values in the range [0, 4 * n]. +template <typename V> +std::vector<V> GenerateValues(int n) { + constexpr int kSeed = 23; + return GenerateValuesWithSeed<V>(n, 4 * n, kSeed); +} + +// Benchmark insertion of values into a container. +template <typename T> +void BM_InsertImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const<typename T::value_type>::type; + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + const auto i = static_cast<int>(state.iterations()); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.erase(key_of_value(values[x])); + } + + state.ResumeTiming(); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.insert(values[x]); + } + } +} + +template <typename T> +void BM_Insert(benchmark::State& state) { + BM_InsertImpl<T>(state, false); +} + +template <typename T> +void BM_InsertSorted(benchmark::State& state) { + BM_InsertImpl<T>(state, true); +} + +// container::insert sometimes returns a pair<iterator, bool> and sometimes +// returns an iterator (for multi- containers). +template <typename Iter> +Iter GetIterFromInsert(const std::pair<Iter, bool>& pair) { + return pair.first; +} +template <typename Iter> +Iter GetIterFromInsert(const Iter iter) { + return iter; +} + +// Benchmark insertion of values into a container at the end. +template <typename T> +void BM_InsertEnd(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + + T container; + const int kSize = 10000; + for (int i = 0; i < kSize; ++i) { + container.insert(Generator<V>(kSize)(i)); + } + V v = Generator<V>(kSize)(kSize - 1); + typename T::key_type k = key_of_value(v); + + auto it = container.find(k); + while (state.KeepRunning()) { + // Repeatedly removing then adding v. + container.erase(it); + it = GetIterFromInsert(container.insert(v)); + } +} + +template <typename T> +void BM_LookupImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const<typename T::value_type>::type; + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + T container(values.begin(), values.end()); + + while (state.KeepRunning()) { + int idx = state.iterations() % kBenchmarkValues; + benchmark::DoNotOptimize(container.find(key_of_value(values[idx]))); + } +} + +// Benchmark lookup of values in a container. +template <typename T> +void BM_Lookup(benchmark::State& state) { + BM_LookupImpl<T>(state, false); +} + +// Benchmark lookup of values in a full container, meaning that values +// are inserted in-order to take advantage of biased insertion, which +// yields a full tree. +template <typename T> +void BM_FullLookup(benchmark::State& state) { + BM_LookupImpl<T>(state, true); +} + +// Benchmark deletion of values from a container. +template <typename T> +void BM_Delete(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + const int i = state.iterations(); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.erase(key_of_value(values[x])); + } + + state.PauseTiming(); + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.insert(values[x]); + } + state.ResumeTiming(); + } +} + +// Benchmark deletion of multiple values from a container. +template <typename T> +void BM_DeleteRange(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + const int i = state.iterations(); + + const int start_index = i % kBenchmarkValues; + + state.PauseTiming(); + { + std::vector<V> removed; + removed.reserve(batch_size); + auto itr = container.find(key_of_value(values[start_index])); + auto start = itr; + for (int j = 0; j < batch_size; j++) { + if (itr == container.end()) { + state.ResumeTiming(); + container.erase(start, itr); + state.PauseTiming(); + itr = container.begin(); + start = itr; + } + removed.push_back(*itr++); + } + + state.ResumeTiming(); + container.erase(start, itr); + state.PauseTiming(); + + container.insert(removed.begin(), removed.end()); + } + state.ResumeTiming(); + } +} + +// Benchmark steady-state insert (into first half of range) and remove (from +// second half of range), treating the container approximately like a queue with +// log-time access for all elements. This benchmark does not test the case where +// insertion and removal happen in the same region of the tree. This benchmark +// counts two value constructors. +template <typename T> +void BM_QueueAddRem(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + + ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); + + T container; + + const size_t half = kBenchmarkValues / 2; + std::vector<int> remove_keys(half); + std::vector<int> add_keys(half); + + // We want to do the exact same work repeatedly, and the benchmark can end + // after a different number of iterations depending on the speed of the + // individual run so we use a large batch size here and ensure that we do + // deterministic work every batch. + while (state.KeepRunningBatch(half * kAddRemBatchSize)) { + state.PauseTiming(); + + container.clear(); + + for (size_t i = 0; i < half; ++i) { + remove_keys[i] = i; + add_keys[i] = i; + } + constexpr int kSeed = 5; + std::mt19937_64 rand(kSeed); + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + + // Note needs lazy generation of values. + Generator<V> g(kBenchmarkValues * kAddRemBatchSize); + + for (size_t i = 0; i < half; ++i) { + container.insert(g(add_keys[i])); + container.insert(g(half + remove_keys[i])); + } + + // There are three parts each of size "half": + // 1 is being deleted from [offset - half, offset) + // 2 is standing [offset, offset + half) + // 3 is being inserted into [offset + half, offset + 2 * half) + size_t offset = 0; + + for (size_t i = 0; i < kAddRemBatchSize; ++i) { + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + offset += half; + + state.ResumeTiming(); + for (size_t idx = 0; idx < half; ++idx) { + container.erase(key_of_value(g(offset - half + remove_keys[idx]))); + container.insert(g(offset + half + add_keys[idx])); + } + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + +// Mixed insertion and deletion in the same range using pre-constructed values. +template <typename T> +void BM_MixedAddRem(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + + ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); + + T container; + + // Create two random shuffles + std::vector<int> remove_keys(kBenchmarkValues); + std::vector<int> add_keys(kBenchmarkValues); + + // We want to do the exact same work repeatedly, and the benchmark can end + // after a different number of iterations depending on the speed of the + // individual run so we use a large batch size here and ensure that we do + // deterministic work every batch. + while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) { + state.PauseTiming(); + + container.clear(); + + constexpr int kSeed = 7; + std::mt19937_64 rand(kSeed); + + std::vector<V> values = GenerateValues<V>(kBenchmarkValues * 2); + + // Insert the first half of the values (already in random order) + container.insert(values.begin(), values.begin() + kBenchmarkValues); + + // Insert the first half of the values (already in random order) + for (size_t i = 0; i < kBenchmarkValues; ++i) { + // remove_keys and add_keys will be swapped before each round, + // therefore fill add_keys here w/ the keys being inserted, so + // they'll be the first to be removed. + remove_keys[i] = i + kBenchmarkValues; + add_keys[i] = i; + } + + for (size_t i = 0; i < kAddRemBatchSize; ++i) { + remove_keys.swap(add_keys); + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + + state.ResumeTiming(); + for (size_t idx = 0; idx < kBenchmarkValues; ++idx) { + container.erase(key_of_value(values[remove_keys[idx]])); + container.insert(values[add_keys[idx]]); + } + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + +// Insertion at end, removal from the beginning. This benchmark +// counts two value constructors. +// TODO(ezb): we could add a GenerateNext version of generator that could reduce +// noise for string-like types. +template <typename T> +void BM_Fifo(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + + T container; + // Need lazy generation of values as state.max_iterations is large. + Generator<V> g(kBenchmarkValues + state.max_iterations); + + for (int i = 0; i < kBenchmarkValues; i++) { + container.insert(g(i)); + } + + while (state.KeepRunning()) { + container.erase(container.begin()); + container.insert(container.end(), g(state.iterations() + kBenchmarkValues)); + } +} + +// Iteration (forward) through the tree +template <typename T> +void BM_FwdIter(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + using R = typename T::value_type const*; + + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + T container(values.begin(), values.end()); + + auto iter = container.end(); + + R r = nullptr; + + while (state.KeepRunning()) { + if (iter == container.end()) iter = container.begin(); + r = &(*iter); + ++iter; + } + + benchmark::DoNotOptimize(r); +} + +// Benchmark random range-construction of a container. +template <typename T> +void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const<typename T::value_type>::type; + + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + { + T container(values.begin(), values.end()); + } + + while (state.KeepRunning()) { + T container(values.begin(), values.end()); + benchmark::DoNotOptimize(container); + } +} + +template <typename T> +void BM_InsertRangeRandom(benchmark::State& state) { + BM_RangeConstructionImpl<T>(state, false); +} + +template <typename T> +void BM_InsertRangeSorted(benchmark::State& state) { + BM_RangeConstructionImpl<T>(state, true); +} + +#define STL_ORDERED_TYPES(value) \ + using stl_set_##value = std::set<value>; \ + using stl_map_##value = std::map<value, intptr_t>; \ + using stl_multiset_##value = std::multiset<value>; \ + using stl_multimap_##value = std::multimap<value, intptr_t> + +using StdString = std::string; +STL_ORDERED_TYPES(int32_t); +STL_ORDERED_TYPES(int64_t); +STL_ORDERED_TYPES(StdString); +STL_ORDERED_TYPES(Time); + +#define STL_UNORDERED_TYPES(value) \ + using stl_unordered_set_##value = std::unordered_set<value>; \ + using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \ + using flat_hash_set_##value = flat_hash_set<value>; \ + using flat_hash_map_##value = flat_hash_map<value, intptr_t>; \ + using stl_unordered_multiset_##value = std::unordered_multiset<value>; \ + using stl_unordered_multimap_##value = \ + std::unordered_multimap<value, intptr_t> + +#define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \ + using stl_unordered_set_##value = std::unordered_set<value, hash>; \ + using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \ + using flat_hash_set_##value = flat_hash_set<value, hash>; \ + using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>; \ + using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \ + using stl_unordered_multimap_##value = \ + std::unordered_multimap<value, intptr_t, hash> + +STL_UNORDERED_TYPES(int32_t); +STL_UNORDERED_TYPES(int64_t); +STL_UNORDERED_TYPES(StdString); +STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash<absl::Time>); + +#define BTREE_TYPES(value) \ + using btree_256_set_##value = \ + btree_set<value, std::less<value>, std::allocator<value>>; \ + using btree_256_map_##value = \ + btree_map<value, intptr_t, std::less<value>, \ + std::allocator<std::pair<const value, intptr_t>>>; \ + using btree_256_multiset_##value = \ + btree_multiset<value, std::less<value>, std::allocator<value>>; \ + using btree_256_multimap_##value = \ + btree_multimap<value, intptr_t, std::less<value>, \ + std::allocator<std::pair<const value, intptr_t>>> + +BTREE_TYPES(int32_t); +BTREE_TYPES(int64_t); +BTREE_TYPES(StdString); +BTREE_TYPES(Time); + +#define MY_BENCHMARK4(type, func) \ + void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \ + BENCHMARK(BM_##type##_##func) + +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, Insert); \ + MY_BENCHMARK4(type, InsertSorted); \ + MY_BENCHMARK4(type, InsertEnd); \ + MY_BENCHMARK4(type, Lookup); \ + MY_BENCHMARK4(type, FullLookup); \ + MY_BENCHMARK4(type, Delete); \ + MY_BENCHMARK4(type, DeleteRange); \ + MY_BENCHMARK4(type, QueueAddRem); \ + MY_BENCHMARK4(type, MixedAddRem); \ + MY_BENCHMARK4(type, Fifo); \ + MY_BENCHMARK4(type, FwdIter); \ + MY_BENCHMARK4(type, InsertRangeRandom); \ + MY_BENCHMARK4(type, InsertRangeSorted) + +#define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ + MY_BENCHMARK3(stl_##type); \ + MY_BENCHMARK3(stl_unordered_##type); \ + MY_BENCHMARK3(btree_256_##type) + +#define MY_BENCHMARK2(type) \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \ + MY_BENCHMARK3(flat_hash_##type) + +// Define MULTI_TESTING to see benchmarks for multi-containers also. +// +// You can use --copt=-DMULTI_TESTING. +#ifdef MULTI_TESTING +#define MY_BENCHMARK(type) \ + MY_BENCHMARK2(set_##type); \ + MY_BENCHMARK2(map_##type); \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type) +#else +#define MY_BENCHMARK(type) \ + MY_BENCHMARK2(set_##type); \ + MY_BENCHMARK2(map_##type) +#endif + +MY_BENCHMARK(int32_t); +MY_BENCHMARK(int64_t); +MY_BENCHMARK(StdString); +MY_BENCHMARK(Time); + +// Define a type whose size and cost of moving are independently customizable. +// When sizeof(value_type) increases, we expect btree to no longer have as much +// cache-locality advantage over STL. When cost of moving increases, we expect +// btree to actually do more work than STL because it has to move values around +// and STL doesn't have to. +template <int Size, int Copies> +struct BigType { + BigType() : BigType(0) {} + explicit BigType(int x) { std::iota(values.begin(), values.end(), x); } + + void Copy(const BigType& x) { + for (int i = 0; i < Size && i < Copies; ++i) values[i] = x.values[i]; + // If Copies > Size, do extra copies. + for (int i = Size, idx = 0; i < Copies; ++i) { + int64_t tmp = x.values[idx]; + benchmark::DoNotOptimize(tmp); + idx = idx + 1 == Size ? 0 : idx + 1; + } + } + + BigType(const BigType& x) { Copy(x); } + BigType& operator=(const BigType& x) { + Copy(x); + return *this; + } + + // Compare only the first Copies elements if Copies is less than Size. + bool operator<(const BigType& other) const { + return std::lexicographical_compare( + values.begin(), values.begin() + std::min(Size, Copies), + other.values.begin(), other.values.begin() + std::min(Size, Copies)); + } + bool operator==(const BigType& other) const { + return std::equal(values.begin(), values.begin() + std::min(Size, Copies), + other.values.begin()); + } + + // Support absl::Hash. + template <typename State> + friend State AbslHashValue(State h, const BigType& b) { + for (int i = 0; i < Size && i < Copies; ++i) + h = State::combine(std::move(h), b.values[i]); + return h; + } + + std::array<int64_t, Size> values; +}; + +#define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \ + using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \ + using stl_map_size##SIZE##copies##COPIES = \ + std::map<BigType<SIZE, COPIES>, intptr_t>; \ + using stl_multiset_size##SIZE##copies##COPIES = \ + std::multiset<BigType<SIZE, COPIES>>; \ + using stl_multimap_size##SIZE##copies##COPIES = \ + std::multimap<BigType<SIZE, COPIES>, intptr_t>; \ + using stl_unordered_set_size##SIZE##copies##COPIES = \ + std::unordered_set<BigType<SIZE, COPIES>, \ + absl::Hash<BigType<SIZE, COPIES>>>; \ + using stl_unordered_map_size##SIZE##copies##COPIES = \ + std::unordered_map<BigType<SIZE, COPIES>, intptr_t, \ + absl::Hash<BigType<SIZE, COPIES>>>; \ + using flat_hash_set_size##SIZE##copies##COPIES = \ + flat_hash_set<BigType<SIZE, COPIES>>; \ + using flat_hash_map_size##SIZE##copies##COPIES = \ + flat_hash_map<BigType<SIZE, COPIES>, intptr_t>; \ + using stl_unordered_multiset_size##SIZE##copies##COPIES = \ + std::unordered_multiset<BigType<SIZE, COPIES>, \ + absl::Hash<BigType<SIZE, COPIES>>>; \ + using stl_unordered_multimap_size##SIZE##copies##COPIES = \ + std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t, \ + absl::Hash<BigType<SIZE, COPIES>>>; \ + using btree_256_set_size##SIZE##copies##COPIES = \ + btree_set<BigType<SIZE, COPIES>>; \ + using btree_256_map_size##SIZE##copies##COPIES = \ + btree_map<BigType<SIZE, COPIES>, intptr_t>; \ + using btree_256_multiset_size##SIZE##copies##COPIES = \ + btree_multiset<BigType<SIZE, COPIES>>; \ + using btree_256_multimap_size##SIZE##copies##COPIES = \ + btree_multimap<BigType<SIZE, COPIES>, intptr_t>; \ + MY_BENCHMARK(size##SIZE##copies##COPIES) + +// Define BIG_TYPE_TESTING to see benchmarks for more big types. +// +// You can use --copt=-DBIG_TYPE_TESTING. +#ifndef NODESIZE_TESTING +#ifdef BIG_TYPE_TESTING +BIG_TYPE_BENCHMARKS(1, 4); +BIG_TYPE_BENCHMARKS(4, 1); +BIG_TYPE_BENCHMARKS(4, 4); +BIG_TYPE_BENCHMARKS(1, 8); +BIG_TYPE_BENCHMARKS(8, 1); +BIG_TYPE_BENCHMARKS(8, 8); +BIG_TYPE_BENCHMARKS(1, 16); +BIG_TYPE_BENCHMARKS(16, 1); +BIG_TYPE_BENCHMARKS(16, 16); +BIG_TYPE_BENCHMARKS(1, 32); +BIG_TYPE_BENCHMARKS(32, 1); +BIG_TYPE_BENCHMARKS(32, 32); +#else +BIG_TYPE_BENCHMARKS(32, 32); +#endif +#endif + +// Benchmark using unique_ptrs to large value types. In order to be able to use +// the same benchmark code as the other types, use a type that holds a +// unique_ptr and has a copy constructor. +template <int Size> +struct BigTypePtr { + BigTypePtr() : BigTypePtr(0) {} + explicit BigTypePtr(int x) { + ptr = absl::make_unique<BigType<Size, Size>>(x); + } + BigTypePtr(const BigTypePtr& x) { + ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr); + } + BigTypePtr(BigTypePtr&& x) noexcept = default; + BigTypePtr& operator=(const BigTypePtr& x) { + ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr); + } + BigTypePtr& operator=(BigTypePtr&& x) noexcept = default; + + bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; } + bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; } + + std::unique_ptr<BigType<Size, Size>> ptr; +}; + +template <int Size> +double ContainerInfo(const btree_set<BigTypePtr<Size>>& b) { + const double bytes_used = + b.bytes_used() + b.size() * sizeof(BigType<Size, Size>); + const double bytes_per_value = bytes_used / b.size(); + BtreeContainerInfoLog(b, bytes_used, bytes_per_value); + return bytes_per_value; +} +template <int Size> +double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) { + const double bytes_used = + b.bytes_used() + b.size() * sizeof(BigType<Size, Size>); + const double bytes_per_value = bytes_used / b.size(); + BtreeContainerInfoLog(b, bytes_used, bytes_per_value); + return bytes_per_value; +} + +#define BIG_TYPE_PTR_BENCHMARKS(SIZE) \ + using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \ + using stl_map_size##SIZE##copies##SIZE##ptr = \ + std::map<int, BigType<SIZE, SIZE>>; \ + using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \ + std::unordered_set<BigType<SIZE, SIZE>, \ + absl::Hash<BigType<SIZE, SIZE>>>; \ + using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \ + std::unordered_map<int, BigType<SIZE, SIZE>>; \ + using flat_hash_set_size##SIZE##copies##SIZE##ptr = \ + flat_hash_set<BigType<SIZE, SIZE>>; \ + using flat_hash_map_size##SIZE##copies##SIZE##ptr = \ + flat_hash_map<int, BigTypePtr<SIZE>>; \ + using btree_256_set_size##SIZE##copies##SIZE##ptr = \ + btree_set<BigTypePtr<SIZE>>; \ + using btree_256_map_size##SIZE##copies##SIZE##ptr = \ + btree_map<int, BigTypePtr<SIZE>>; \ + MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) + +BIG_TYPE_PTR_BENCHMARKS(32); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h new file mode 100644 index 00000000..d23f4ee5 --- /dev/null +++ b/absl/container/btree_map.h @@ -0,0 +1,759 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_map.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree maps: sorted associative containers mapping +// keys to values. +// +// * `absl::btree_map<>` +// * `absl::btree_multimap<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::map` and `std::multimap`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::map` and `std::multimap`, which are commonly implemented using +// red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree maps perform better than their `std::map` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::map` and `std::multimap` as there are some API differences, which are +// noted in this header file. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()` and `erase()` return a valid iterator at the current +// position. + +#ifndef ABSL_CONTAINER_BTREE_MAP_H_ +#define ABSL_CONTAINER_BTREE_MAP_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::btree_map<> +// +// An `absl::btree_map<K, V>` is an ordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::map` (in most cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less<K>`. +// +// An `absl::btree_map<K, V>` uses a default allocator of +// `std::allocator<std::pair<const K, V>>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_map<K, V, C, A>`. +// +template <typename Key, typename Value, typename Compare = std::less<Key>, + typename Alloc = std::allocator<std::pair<const Key, Value>>> +class btree_map + : public container_internal::btree_map_container< + container_internal::btree<container_internal::map_params< + Key, Value, Compare, Alloc, /*TargetNodeSize=*/256, + /*Multi=*/false>>> { + using Base = typename btree_map::btree_map_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_map` supports the same overload set as `std::map` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_map<int, std::string> map1; + // + // * Initializer List constructor + // + // absl::btree_map<int, std::string> map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_map<int, std::string> map3(map2); + // + // * Copy assignment operator + // + // absl::btree_map<int, std::string> map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_map<int, std::string> map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_map<int, std::string> map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector<std::pair<int, std::string>> v = {{1, "a"}, {2, "b"}}; + // absl::btree_map<int, std::string> map7(v.begin(), v.end()); + btree_map() {} + using Base::Base; + + // btree_map::begin() + // + // Returns an iterator to the beginning of the `btree_map`. + using Base::begin; + + // btree_map::cbegin() + // + // Returns a const iterator to the beginning of the `btree_map`. + using Base::cbegin; + + // btree_map::end() + // + // Returns an iterator to the end of the `btree_map`. + using Base::end; + + // btree_map::cend() + // + // Returns a const iterator to the end of the `btree_map`. + using Base::cend; + + // btree_map::empty() + // + // Returns whether or not the `btree_map` is empty. + using Base::empty; + + // btree_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_map<Key, T>`. + using Base::max_size; + + // btree_map::size() + // + // Returns the number of elements currently within the `btree_map`. + using Base::size; + + // btree_map::clear() + // + // Removes all elements from the `btree_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_map::erase() + // + // Erases elements within the `btree_map`. If an erase occurs, any references, + // pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_map`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template <typename K> size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased. + using Base::erase; + + // btree_map::insert() + // + // Inserts an element of the specified value into the `btree_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair<iterator,bool> insert(const value_type& value): + // + // Inserts a value into the `btree_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair<iterator,bool> insert(value_type&& value): + // + // Inserts a moveable value into the `btree_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list<init_type> ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_map::insert_or_assign() + // + // Inserts an element of the specified value into the `btree_map` provided + // that a value with the given key does not already exist, or replaces the + // corresponding mapped type with the forwarded `obj` argument if a key for + // that value already exists, returning an iterator pointing to the newly + // inserted element. Overloads are listed below. + // + // pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj): + // pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map`. If the returned bool is true, insertion took place, and if + // it's false, assignment took place. + // + // iterator insert_or_assign(const_iterator hint, + // const key_type& k, M&& obj): + // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // btree_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + // + // Overloads are listed below. + // + // std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args): + // std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // btree_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template <typename K> node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_map` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_map::merge() + // + // Extracts elements from a given `source` btree_map into this + // `btree_map`. If the destination `btree_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_map::swap(btree_map& other) + // + // Exchanges the contents of this `btree_map` with those of the `other` + // btree_map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // btree_map::contains() + // + // template <typename K> bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_map`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_map::count() + // + // template <typename K> size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_map`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_map`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_map`. + using Base::equal_range; + + // btree_map::find() + // + // template <typename K> iterator find(const K& key): + // template <typename K> const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_map`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `btree_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. Otherwise iterators are not affected and references are not + // invalidated. Overloads are listed below. + // + // T& operator[](key_type&& key): + // T& operator[](const key_type& key): + // + // Inserts a value_type object constructed in-place if the element with the + // given key does not exist. + using Base::operator[]; + + // btree_map::get_allocator() + // + // Returns the allocator function associated with this `btree_map`. + using Base::get_allocator; + + // btree_map::key_comp(); + // + // Returns the key comparator associated with this `btree_map`. + using Base::key_comp; + + // btree_map::value_comp(); + // + // Returns the value comparator associated with this `btree_map`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_map<>, absl::btree_map<>) +// +// Swaps the contents of two `absl::btree_map` containers. +template <typename K, typename V, typename C, typename A> +void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_map<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template <typename K, typename V, typename C, typename A, typename Pred> +void erase_if(btree_map<K, V, C, A> &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } + } +} + +// absl::btree_multimap +// +// An `absl::btree_multimap<K, V>` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement for +// `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap +// allows multiple elements with equivalent keys. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less<K>`. +// +// An `absl::btree_multimap<K, V>` uses a default allocator of +// `std::allocator<std::pair<const K, V>>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_multimap<K, V, C, A>`. +// +template <typename Key, typename Value, typename Compare = std::less<Key>, + typename Alloc = std::allocator<std::pair<const Key, Value>>> +class btree_multimap + : public container_internal::btree_multimap_container< + container_internal::btree<container_internal::map_params< + Key, Value, Compare, Alloc, /*TargetNodeSize=*/256, + /*Multi=*/true>>> { + using Base = typename btree_multimap::btree_multimap_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multimap` supports the same overload set as `std::multimap` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multimap<int, std::string> map1; + // + // * Initializer List constructor + // + // absl::btree_multimap<int, std::string> map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_multimap<int, std::string> map3(map2); + // + // * Copy assignment operator + // + // absl::btree_multimap<int, std::string> map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multimap<int, std::string> map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multimap<int, std::string> map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector<std::pair<int, std::string>> v = {{1, "a"}, {2, "b"}}; + // absl::btree_multimap<int, std::string> map7(v.begin(), v.end()); + btree_multimap() {} + using Base::Base; + + // btree_multimap::begin() + // + // Returns an iterator to the beginning of the `btree_multimap`. + using Base::begin; + + // btree_multimap::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multimap`. + using Base::cbegin; + + // btree_multimap::end() + // + // Returns an iterator to the end of the `btree_multimap`. + using Base::end; + + // btree_multimap::cend() + // + // Returns a const iterator to the end of the `btree_multimap`. + using Base::cend; + + // btree_multimap::empty() + // + // Returns whether or not the `btree_multimap` is empty. + using Base::empty; + + // btree_multimap::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multimap` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multimap<Key, T>`. + using Base::max_size; + + // btree_multimap::size() + // + // Returns the number of elements currently within the `btree_multimap`. + using Base::size; + + // btree_multimap::clear() + // + // Removes all elements from the `btree_multimap`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multimap::erase() + // + // Erases elements within the `btree_multimap`. If an erase occurs, any + // references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multimap`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template <typename K> size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multimap::insert() + // + // Inserts an element of the specified value into the `btree_multimap`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multimap`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multimap`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list<init_type> ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multimap::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multimap::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multimap::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template <typename K> node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multimap` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multimap::merge() + // + // Extracts elements from a given `source` btree_multimap into this + // `btree_multimap`. If the destination `btree_multimap` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_multimap::swap(btree_multimap& other) + // + // Exchanges the contents of this `btree_multimap` with those of the `other` + // btree_multimap, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multimap` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multimap::contains() + // + // template <typename K> bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multimap`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_multimap::count() + // + // template <typename K> size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_multimap::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multimap`. + using Base::equal_range; + + // btree_multimap::find() + // + // template <typename K> iterator find(const K& key): + // template <typename K> const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_multimap::get_allocator() + // + // Returns the allocator function associated with this `btree_multimap`. + using Base::get_allocator; + + // btree_multimap::key_comp(); + // + // Returns the key comparator associated with this `btree_multimap`. + using Base::key_comp; + + // btree_multimap::value_comp(); + // + // Returns the value comparator associated with this `btree_multimap`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) +// +// Swaps the contents of two `absl::btree_multimap` containers. +template <typename K, typename V, typename C, typename A> +void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multimap<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template <typename K, typename V, typename C, typename A, typename Pred> +void erase_if(btree_multimap<K, V, C, A> &map, Pred pred) { + for (auto it = map.begin(); it != map.end();) { + if (pred(*it)) { + it = map.erase(it); + } else { + ++it; + } + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_MAP_H_ diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h new file mode 100644 index 00000000..127fb940 --- /dev/null +++ b/absl/container/btree_set.h @@ -0,0 +1,683 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_set.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree sets: sorted associative containers of +// values. +// +// * `absl::btree_set<>` +// * `absl::btree_multiset<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::set` and `std::multiset`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::set` and `std::multiset`, which are commonly implemented using +// red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree sets perform better than their `std::set` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::set` and `std::multiset` as there are some API differences, which are +// noted in this header file. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()` and `erase()` return a valid iterator at the current +// position. + +#ifndef ABSL_CONTAINER_BTREE_SET_H_ +#define ABSL_CONTAINER_BTREE_SET_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::btree_set<> +// +// An `absl::btree_set<K>` is an ordered associative container of unique key +// values designed to be a more efficient replacement for `std::set` (in most +// cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less<K>`. +// +// An `absl::btree_set<K>` uses a default allocator of `std::allocator<K>` to +// allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_set<K, C, A>`. +// +template <typename Key, typename Compare = std::less<Key>, + typename Alloc = std::allocator<Key>> +class btree_set + : public container_internal::btree_set_container< + container_internal::btree<container_internal::set_params< + Key, Compare, Alloc, /*TargetNodeSize=*/256, + /*Multi=*/false>>> { + using Base = typename btree_set::btree_set_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_set` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_set<std::string> set1; + // + // * Initializer List constructor + // + // absl::btree_set<std::string> set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_set<std::string> set3(set2); + // + // * Copy assignment operator + // + // absl::btree_set<std::string> set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_set<std::string> set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_set<std::string> set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector<std::string> v = {"a", "b"}; + // absl::btree_set<std::string> set7(v.begin(), v.end()); + btree_set() {} + using Base::Base; + + // btree_set::begin() + // + // Returns an iterator to the beginning of the `btree_set`. + using Base::begin; + + // btree_set::cbegin() + // + // Returns a const iterator to the beginning of the `btree_set`. + using Base::cbegin; + + // btree_set::end() + // + // Returns an iterator to the end of the `btree_set`. + using Base::end; + + // btree_set::cend() + // + // Returns a const iterator to the end of the `btree_set`. + using Base::cend; + + // btree_set::empty() + // + // Returns whether or not the `btree_set` is empty. + using Base::empty; + + // btree_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_set` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_set<Key>`. + using Base::max_size; + + // btree_set::size() + // + // Returns the number of elements currently within the `btree_set`. + using Base::size; + + // btree_set::clear() + // + // Removes all elements from the `btree_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_set::erase() + // + // Erases elements within the `btree_set`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_set`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template <typename K> size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased. + using Base::erase; + + // btree_set::insert() + // + // Inserts an element of the specified value into the `btree_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair<iterator,bool> insert(const value_type& value): + // + // Inserts a value into the `btree_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair<iterator,bool> insert(value_type&& value): + // + // Inserts a moveable value into the `btree_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list<init_type> ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template <typename K> node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_set::merge() + // + // Extracts elements from a given `source` btree_set into this + // `btree_set`. If the destination `btree_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_set::swap(btree_set& other) + // + // Exchanges the contents of this `btree_set` with those of the `other` + // btree_set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_set::contains() + // + // template <typename K> bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_set`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_set::count() + // + // template <typename K> size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_set`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_set`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_set`. + using Base::equal_range; + + // btree_set::find() + // + // template <typename K> iterator find(const K& key): + // template <typename K> const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_set::get_allocator() + // + // Returns the allocator function associated with this `btree_set`. + using Base::get_allocator; + + // btree_set::key_comp(); + // + // Returns the key comparator associated with this `btree_set`. + using Base::key_comp; + + // btree_set::value_comp(); + // + // Returns the value comparator associated with this `btree_set`. The keys to + // sort the elements are the values themselves, therefore `value_comp` and its + // sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_set<>, absl::btree_set<>) +// +// Swaps the contents of two `absl::btree_set` containers. +template <typename K, typename C, typename A> +void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_set<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template <typename K, typename C, typename A, typename Pred> +void erase_if(btree_set<K, C, A> &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } + } +} + +// absl::btree_multiset<> +// +// An `absl::btree_multiset<K>` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement +// for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree +// multiset allows equivalent elements. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less<K>`. +// +// An `absl::btree_multiset<K>` uses a default allocator of `std::allocator<K>` +// to allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_multiset<K, C, A>`. +// +template <typename Key, typename Compare = std::less<Key>, + typename Alloc = std::allocator<Key>> +class btree_multiset + : public container_internal::btree_multiset_container< + container_internal::btree<container_internal::set_params< + Key, Compare, Alloc, /*TargetNodeSize=*/256, + /*Multi=*/true>>> { + using Base = typename btree_multiset::btree_multiset_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multiset` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multiset<std::string> set1; + // + // * Initializer List constructor + // + // absl::btree_multiset<std::string> set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_multiset<std::string> set3(set2); + // + // * Copy assignment operator + // + // absl::btree_multiset<std::string> set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multiset<std::string> set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multiset<std::string> set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector<std::string> v = {"a", "b"}; + // absl::btree_multiset<std::string> set7(v.begin(), v.end()); + btree_multiset() {} + using Base::Base; + + // btree_multiset::begin() + // + // Returns an iterator to the beginning of the `btree_multiset`. + using Base::begin; + + // btree_multiset::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multiset`. + using Base::cbegin; + + // btree_multiset::end() + // + // Returns an iterator to the end of the `btree_multiset`. + using Base::end; + + // btree_multiset::cend() + // + // Returns a const iterator to the end of the `btree_multiset`. + using Base::cend; + + // btree_multiset::empty() + // + // Returns whether or not the `btree_multiset` is empty. + using Base::empty; + + // btree_multiset::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multiset` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multiset<Key>`. + using Base::max_size; + + // btree_multiset::size() + // + // Returns the number of elements currently within the `btree_multiset`. + using Base::size; + + // btree_multiset::clear() + // + // Removes all elements from the `btree_multiset`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multiset::erase() + // + // Erases elements within the `btree_multiset`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multiset`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template <typename K> size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multiset::insert() + // + // Inserts an element of the specified value into the `btree_multiset`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multiset`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multiset`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list<init_type> ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multiset::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multiset::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multiset::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template <typename K> node_type extract(const K& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multiset` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multiset::merge() + // + // Extracts elements from a given `source` btree_multiset into this + // `btree_multiset`. If the destination `btree_multiset` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_multiset::swap(btree_multiset& other) + // + // Exchanges the contents of this `btree_multiset` with those of the `other` + // btree_multiset, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multiset` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multiset::contains() + // + // template <typename K> bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multiset`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::contains; + + // btree_multiset::count() + // + // template <typename K> size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::count; + + // btree_multiset::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multiset`. + using Base::equal_range; + + // btree_multiset::find() + // + // template <typename K> iterator find(const K& key): + // template <typename K> const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set is provided a + // compatible heterogeneous comparator. + using Base::find; + + // btree_multiset::get_allocator() + // + // Returns the allocator function associated with this `btree_multiset`. + using Base::get_allocator; + + // btree_multiset::key_comp(); + // + // Returns the key comparator associated with this `btree_multiset`. + using Base::key_comp; + + // btree_multiset::value_comp(); + // + // Returns the value comparator associated with this `btree_multiset`. The + // keys to sort the elements are the values themselves, therefore `value_comp` + // and its sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) +// +// Swaps the contents of two `absl::btree_multiset` containers. +template <typename K, typename C, typename A> +void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multiset<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +template <typename K, typename C, typename A, typename Pred> +void erase_if(btree_multiset<K, C, A> &set, Pred pred) { + for (auto it = set.begin(); it != set.end();) { + if (pred(*it)) { + it = set.erase(it); + } else { + ++it; + } + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_SET_H_ diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc new file mode 100644 index 00000000..9edf38f9 --- /dev/null +++ b/absl/container/btree_test.cc @@ -0,0 +1,2404 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/btree_test.h" + +#include <cstdint> +#include <map> +#include <memory> +#include <stdexcept> +#include <string> +#include <type_traits> +#include <utility> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/internal/counting_allocator.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/flags/flag.h" +#include "absl/hash/hash_testing.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" + +ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests"); + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::test_internal::CopyableMovableInstance; +using ::absl::test_internal::InstanceTracker; +using ::absl::test_internal::MovableOnlyInstance; +using ::testing::ElementsAre; +using ::testing::ElementsAreArray; +using ::testing::IsEmpty; +using ::testing::Pair; + +template <typename T, typename U> +void CheckPairEquals(const T &x, const U &y) { + ABSL_INTERNAL_CHECK(x == y, "Values are unequal."); +} + +template <typename T, typename U, typename V, typename W> +void CheckPairEquals(const std::pair<T, U> &x, const std::pair<V, W> &y) { + CheckPairEquals(x.first, y.first); + CheckPairEquals(x.second, y.second); +} +} // namespace + +// The base class for a sorted associative container checker. TreeType is the +// container type to check and CheckerType is the container type to check +// against. TreeType is expected to be btree_{set,map,multiset,multimap} and +// CheckerType is expected to be {set,map,multiset,multimap}. +template <typename TreeType, typename CheckerType> +class base_checker { + public: + using key_type = typename TreeType::key_type; + using value_type = typename TreeType::value_type; + using key_compare = typename TreeType::key_compare; + using pointer = typename TreeType::pointer; + using const_pointer = typename TreeType::const_pointer; + using reference = typename TreeType::reference; + using const_reference = typename TreeType::const_reference; + using size_type = typename TreeType::size_type; + using difference_type = typename TreeType::difference_type; + using iterator = typename TreeType::iterator; + using const_iterator = typename TreeType::const_iterator; + using reverse_iterator = typename TreeType::reverse_iterator; + using const_reverse_iterator = typename TreeType::const_reverse_iterator; + + public: + base_checker() : const_tree_(tree_) {} + base_checker(const base_checker &x) + : tree_(x.tree_), const_tree_(tree_), checker_(x.checker_) {} + template <typename InputIterator> + base_checker(InputIterator b, InputIterator e) + : tree_(b, e), const_tree_(tree_), checker_(b, e) {} + + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + + template <typename IterType, typename CheckerIterType> + IterType iter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.end()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.end(), + "Checker iterator not at end."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + template <typename IterType, typename CheckerIterType> + IterType riter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.rend()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.rend(), + "Checker iterator not at rend."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + void value_check(const value_type &x) { + typename KeyOfValue<typename TreeType::key_type, + typename TreeType::value_type>::type key_of_value; + const key_type &key = key_of_value(x); + CheckPairEquals(*find(key), x); + lower_bound(key); + upper_bound(key); + equal_range(key); + contains(key); + count(key); + } + void erase_check(const key_type &key) { + EXPECT_FALSE(tree_.contains(key)); + EXPECT_EQ(tree_.find(key), const_tree_.end()); + EXPECT_FALSE(const_tree_.contains(key)); + EXPECT_EQ(const_tree_.find(key), tree_.end()); + EXPECT_EQ(tree_.equal_range(key).first, + const_tree_.equal_range(key).second); + } + + iterator lower_bound(const key_type &key) { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + const_iterator lower_bound(const key_type &key) const { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + iterator upper_bound(const key_type &key) { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + const_iterator upper_bound(const key_type &key) const { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + std::pair<iterator, iterator> equal_range(const key_type &key) { + std::pair<typename CheckerType::iterator, typename CheckerType::iterator> + checker_res = checker_.equal_range(key); + std::pair<iterator, iterator> tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + std::pair<const_iterator, const_iterator> equal_range( + const key_type &key) const { + std::pair<typename CheckerType::const_iterator, + typename CheckerType::const_iterator> + checker_res = checker_.equal_range(key); + std::pair<const_iterator, const_iterator> tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + iterator find(const key_type &key) { + return iter_check(tree_.find(key), checker_.find(key)); + } + const_iterator find(const key_type &key) const { + return iter_check(tree_.find(key), checker_.find(key)); + } + bool contains(const key_type &key) const { return find(key) != end(); } + size_type count(const key_type &key) const { + size_type res = checker_.count(key); + EXPECT_EQ(res, tree_.count(key)); + return res; + } + + base_checker &operator=(const base_checker &x) { + tree_ = x.tree_; + checker_ = x.checker_; + return *this; + } + + int erase(const key_type &key) { + int size = tree_.size(); + int res = checker_.erase(key); + EXPECT_EQ(res, tree_.count(key)); + EXPECT_EQ(res, tree_.erase(key)); + EXPECT_EQ(tree_.count(key), 0); + EXPECT_EQ(tree_.size(), size - res); + erase_check(key); + return res; + } + iterator erase(iterator iter) { + key_type key = iter.key(); + int size = tree_.size(); + int count = tree_.count(key); + auto checker_iter = checker_.lower_bound(key); + for (iterator tmp(tree_.lower_bound(key)); tmp != iter; ++tmp) { + ++checker_iter; + } + auto checker_next = checker_iter; + ++checker_next; + checker_.erase(checker_iter); + iter = tree_.erase(iter); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - 1); + EXPECT_EQ(tree_.count(key), count - 1); + if (count == 1) { + erase_check(key); + } + return iter_check(iter, checker_next); + } + + void erase(iterator begin, iterator end) { + int size = tree_.size(); + int count = std::distance(begin, end); + auto checker_begin = checker_.lower_bound(begin.key()); + for (iterator tmp(tree_.lower_bound(begin.key())); tmp != begin; ++tmp) { + ++checker_begin; + } + auto checker_end = + end == tree_.end() ? checker_.end() : checker_.lower_bound(end.key()); + if (end != tree_.end()) { + for (iterator tmp(tree_.lower_bound(end.key())); tmp != end; ++tmp) { + ++checker_end; + } + } + const auto checker_ret = checker_.erase(checker_begin, checker_end); + const auto tree_ret = tree_.erase(begin, end); + EXPECT_EQ(std::distance(checker_.begin(), checker_ret), + std::distance(tree_.begin(), tree_ret)); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - count); + } + + void clear() { + tree_.clear(); + checker_.clear(); + } + void swap(base_checker &x) { + tree_.swap(x.tree_); + checker_.swap(x.checker_); + } + + void verify() const { + tree_.verify(); + EXPECT_EQ(tree_.size(), checker_.size()); + + // Move through the forward iterators using increment. + auto checker_iter = checker_.begin(); + const_iterator tree_iter(tree_.begin()); + for (; tree_iter != tree_.end(); ++tree_iter, ++checker_iter) { + CheckPairEquals(*tree_iter, *checker_iter); + } + + // Move through the forward iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + iter_check(tree_iter, checker_iter); + --tree_iter; + --checker_iter; + } + EXPECT_EQ(tree_iter, tree_.begin()); + EXPECT_EQ(checker_iter, checker_.begin()); + + // Move through the reverse iterators using increment. + auto checker_riter = checker_.rbegin(); + const_reverse_iterator tree_riter(tree_.rbegin()); + for (; tree_riter != tree_.rend(); ++tree_riter, ++checker_riter) { + CheckPairEquals(*tree_riter, *checker_riter); + } + + // Move through the reverse iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + riter_check(tree_riter, checker_riter); + --tree_riter; + --checker_riter; + } + EXPECT_EQ(tree_riter, tree_.rbegin()); + EXPECT_EQ(checker_riter, checker_.rbegin()); + } + + const TreeType &tree() const { return tree_; } + + size_type size() const { + EXPECT_EQ(tree_.size(), checker_.size()); + return tree_.size(); + } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { + EXPECT_EQ(tree_.empty(), checker_.empty()); + return tree_.empty(); + } + + protected: + TreeType tree_; + const TreeType &const_tree_; + CheckerType checker_; +}; + +namespace { +// A checker for unique sorted associative containers. TreeType is expected to +// be btree_{set,map} and CheckerType is expected to be {set,map}. +template <typename TreeType, typename CheckerType> +class unique_checker : public base_checker<TreeType, CheckerType> { + using super_type = base_checker<TreeType, CheckerType>; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + unique_checker() : super_type() {} + unique_checker(const unique_checker &x) : super_type(x) {} + template <class InputIterator> + unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + unique_checker &operator=(const unique_checker &) = default; + + // Insertion routines. + std::pair<iterator, bool> insert(const value_type &x) { + int size = this->tree_.size(); + std::pair<typename CheckerType::iterator, bool> checker_res = + this->checker_.insert(x); + std::pair<iterator, bool> tree_res = this->tree_.insert(x); + CheckPairEquals(*tree_res.first, *checker_res.first); + EXPECT_EQ(tree_res.second, checker_res.second); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + tree_res.second); + return tree_res; + } + iterator insert(iterator position, const value_type &x) { + int size = this->tree_.size(); + std::pair<typename CheckerType::iterator, bool> checker_res = + this->checker_.insert(x); + iterator tree_res = this->tree_.insert(position, x); + CheckPairEquals(*tree_res, *checker_res.first); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + checker_res.second); + return tree_res; + } + template <typename InputIterator> + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +// A checker for multiple sorted associative containers. TreeType is expected +// to be btree_{multiset,multimap} and CheckerType is expected to be +// {multiset,multimap}. +template <typename TreeType, typename CheckerType> +class multi_checker : public base_checker<TreeType, CheckerType> { + using super_type = base_checker<TreeType, CheckerType>; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + multi_checker() : super_type() {} + multi_checker(const multi_checker &x) : super_type(x) {} + template <class InputIterator> + multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + multi_checker &operator=(const multi_checker &) = default; + + // Insertion routines. + iterator insert(const value_type &x) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(x); + iterator tree_res = this->tree_.insert(x); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + iterator insert(iterator position, const value_type &x) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(x); + iterator tree_res = this->tree_.insert(position, x); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + template <typename InputIterator> + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +template <typename T, typename V> +void DoTest(const char *name, T *b, const std::vector<V> &values) { + typename KeyOfValue<typename T::key_type, V>::type key_of_value; + + T &mutable_b = *b; + const T &const_b = *b; + + // Test insert. + for (int i = 0; i < values.size(); ++i) { + mutable_b.insert(values[i]); + mutable_b.value_check(values[i]); + } + ASSERT_EQ(mutable_b.size(), values.size()); + + const_b.verify(); + + // Test copy constructor. + T b_copy(const_b); + EXPECT_EQ(b_copy.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_copy.find(key_of_value(values[i])), values[i]); + } + + // Test range constructor. + T b_range(const_b.begin(), const_b.end()); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test range insertion for values that already exist. + b_range.insert(b_copy.begin(), b_copy.end()); + b_range.verify(); + + // Test range insertion for new values. + b_range.clear(); + b_range.insert(b_copy.begin(), b_copy.end()); + EXPECT_EQ(b_range.size(), b_copy.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test assignment to self. Nothing should change. + b_range.operator=(b_range); + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test assignment of new values. + b_range.clear(); + b_range = b_copy; + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test swap. + b_range.clear(); + b_range.swap(b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + b_range.swap(b_copy); + + // Test non-member function swap. + swap(b_range, b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + swap(b_range, b_copy); + + // Test erase via values. + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(key_of_value(values[i])); + // Erasing a non-existent key should have no effect. + ASSERT_EQ(mutable_b.erase(key_of_value(values[i])), 0); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test erase via iterators. + mutable_b = b_copy; + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(mutable_b.find(key_of_value(values[i]))); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test insert with hint. + for (int i = 0; i < values.size(); i++) { + mutable_b.insert(mutable_b.upper_bound(key_of_value(values[i])), values[i]); + } + + const_b.verify(); + + // Test range erase. + mutable_b.erase(mutable_b.begin(), mutable_b.end()); + EXPECT_EQ(mutable_b.size(), 0); + const_b.verify(); + + // First half. + mutable_b = b_copy; + typename T::iterator mutable_iter_end = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_b.begin(), mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 2); + const_b.verify(); + + // Second half. + mutable_b = b_copy; + typename T::iterator mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_begin; + mutable_b.erase(mutable_iter_begin, mutable_b.end()); + EXPECT_EQ(mutable_b.size(), values.size() / 2); + const_b.verify(); + + // Second quarter. + mutable_b = b_copy; + mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_begin; + mutable_iter_end = mutable_iter_begin; + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_iter_begin, mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 4); + const_b.verify(); + + mutable_b.clear(); +} + +template <typename T> +void ConstTest() { + using value_type = typename T::value_type; + typename KeyOfValue<typename T::key_type, value_type>::type key_of_value; + + T mutable_b; + const T &const_b = mutable_b; + + // Insert a single value into the container and test looking it up. + value_type value = Generator<value_type>(2)(2); + mutable_b.insert(value); + EXPECT_TRUE(mutable_b.contains(key_of_value(value))); + EXPECT_NE(mutable_b.find(key_of_value(value)), const_b.end()); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_NE(const_b.find(key_of_value(value)), mutable_b.end()); + EXPECT_EQ(*const_b.lower_bound(key_of_value(value)), value); + EXPECT_EQ(const_b.upper_bound(key_of_value(value)), const_b.end()); + EXPECT_EQ(*const_b.equal_range(key_of_value(value)).first, value); + + // We can only create a non-const iterator from a non-const container. + typename T::iterator mutable_iter(mutable_b.begin()); + EXPECT_EQ(mutable_iter, const_b.begin()); + EXPECT_NE(mutable_iter, const_b.end()); + EXPECT_EQ(const_b.begin(), mutable_iter); + EXPECT_NE(const_b.end(), mutable_iter); + typename T::reverse_iterator mutable_riter(mutable_b.rbegin()); + EXPECT_EQ(mutable_riter, const_b.rbegin()); + EXPECT_NE(mutable_riter, const_b.rend()); + EXPECT_EQ(const_b.rbegin(), mutable_riter); + EXPECT_NE(const_b.rend(), mutable_riter); + + // We can create a const iterator from a non-const iterator. + typename T::const_iterator const_iter(mutable_iter); + EXPECT_EQ(const_iter, mutable_b.begin()); + EXPECT_NE(const_iter, mutable_b.end()); + EXPECT_EQ(mutable_b.begin(), const_iter); + EXPECT_NE(mutable_b.end(), const_iter); + typename T::const_reverse_iterator const_riter(mutable_riter); + EXPECT_EQ(const_riter, mutable_b.rbegin()); + EXPECT_NE(const_riter, mutable_b.rend()); + EXPECT_EQ(mutable_b.rbegin(), const_riter); + EXPECT_NE(mutable_b.rend(), const_riter); + + // Make sure various methods can be invoked on a const container. + const_b.verify(); + ASSERT_TRUE(!const_b.empty()); + EXPECT_EQ(const_b.size(), 1); + EXPECT_GT(const_b.max_size(), 0); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_EQ(const_b.count(key_of_value(value)), 1); +} + +template <typename T, typename C> +void BtreeTest() { + ConstTest<T>(); + + using V = typename remove_pair_const<typename T::value_type>::type; + const std::vector<V> random_values = GenerateValuesWithSeed<V>( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + testing::GTEST_FLAG(random_seed)); + + unique_checker<T, C> container; + + // Test key insertion/deletion in sorted order. + std::vector<V> sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test key insertion/deletion in random order. + DoTest("random: ", &container, random_values); +} + +template <typename T, typename C> +void BtreeMultiTest() { + ConstTest<T>(); + + using V = typename remove_pair_const<typename T::value_type>::type; + const std::vector<V> random_values = GenerateValuesWithSeed<V>( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + testing::GTEST_FLAG(random_seed)); + + multi_checker<T, C> container; + + // Test keys in sorted order. + std::vector<V> sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test keys in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test keys in random order. + DoTest("random: ", &container, random_values); + + // Test keys in random order w/ duplicates. + std::vector<V> duplicate_values(random_values); + duplicate_values.insert(duplicate_values.end(), random_values.begin(), + random_values.end()); + DoTest("duplicates:", &container, duplicate_values); + + // Test all identical keys. + std::vector<V> identical_values(100); + std::fill(identical_values.begin(), identical_values.end(), + Generator<V>(2)(2)); + DoTest("identical: ", &container, identical_values); +} + +template <typename T> +struct PropagatingCountingAlloc : public CountingAllocator<T> { + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + + using Base = CountingAllocator<T>; + using Base::Base; + + template <typename U> + explicit PropagatingCountingAlloc(const PropagatingCountingAlloc<U> &other) + : Base(other.bytes_used_) {} + + template <typename U> + struct rebind { + using other = PropagatingCountingAlloc<U>; + }; +}; + +template <typename T> +void BtreeAllocatorTest() { + using value_type = typename T::value_type; + + int64_t bytes1 = 0, bytes2 = 0; + PropagatingCountingAlloc<T> allocator1(&bytes1); + PropagatingCountingAlloc<T> allocator2(&bytes2); + Generator<value_type> generator(1000); + + // Test that we allocate properly aligned memory. If we don't, then Layout + // will assert fail. + auto unused1 = allocator1.allocate(1); + auto unused2 = allocator2.allocate(1); + + // Test copy assignment + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should propagate the allocator. + b1 = b2; + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b2.size(), 0); + EXPECT_EQ(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + // Test move assignment + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should propagate the allocator. + b1 = std::move(b2); + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + // Test swap + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should swap the allocators. + swap(b1, b2); + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b2.size(), 1); + EXPECT_GT(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + allocator1.deallocate(unused1, 1); + allocator2.deallocate(unused2, 1); +} + +template <typename T> +void BtreeMapTest() { + using value_type = typename T::value_type; + using mapped_type = typename T::mapped_type; + + mapped_type m = Generator<mapped_type>(0)(0); + (void)m; + + T b; + + // Verify we can insert using operator[]. + for (int i = 0; i < 1000; i++) { + value_type v = Generator<value_type>(1000)(i); + b[v.first] = v.second; + } + EXPECT_EQ(b.size(), 1000); + + // Test whether we can use the "->" operator on iterators and + // reverse_iterators. This stresses the btree_map_params::pair_pointer + // mechanism. + EXPECT_EQ(b.begin()->first, Generator<value_type>(1000)(0).first); + EXPECT_EQ(b.begin()->second, Generator<value_type>(1000)(0).second); + EXPECT_EQ(b.rbegin()->first, Generator<value_type>(1000)(999).first); + EXPECT_EQ(b.rbegin()->second, Generator<value_type>(1000)(999).second); +} + +template <typename T> +void BtreeMultiMapTest() { + using mapped_type = typename T::mapped_type; + mapped_type m = Generator<mapped_type>(0)(0); + (void)m; +} + +template <typename K, int N = 256> +void SetTest() { + EXPECT_EQ( + sizeof(absl::btree_set<K>), + 2 * sizeof(void *) + sizeof(typename absl::btree_set<K>::size_type)); + using BtreeSet = absl::btree_set<K>; + using CountingBtreeSet = + absl::btree_set<K, std::less<K>, PropagatingCountingAlloc<K>>; + BtreeTest<BtreeSet, std::set<K>>(); + BtreeAllocatorTest<CountingBtreeSet>(); +} + +template <typename K, int N = 256> +void MapTest() { + EXPECT_EQ( + sizeof(absl::btree_map<K, K>), + 2 * sizeof(void *) + sizeof(typename absl::btree_map<K, K>::size_type)); + using BtreeMap = absl::btree_map<K, K>; + using CountingBtreeMap = + absl::btree_map<K, K, std::less<K>, + PropagatingCountingAlloc<std::pair<const K, K>>>; + BtreeTest<BtreeMap, std::map<K, K>>(); + BtreeAllocatorTest<CountingBtreeMap>(); + BtreeMapTest<BtreeMap>(); +} + +TEST(Btree, set_int32) { SetTest<int32_t>(); } +TEST(Btree, set_int64) { SetTest<int64_t>(); } +TEST(Btree, set_string) { SetTest<std::string>(); } +TEST(Btree, set_pair) { SetTest<std::pair<int, int>>(); } +TEST(Btree, map_int32) { MapTest<int32_t>(); } +TEST(Btree, map_int64) { MapTest<int64_t>(); } +TEST(Btree, map_string) { MapTest<std::string>(); } +TEST(Btree, map_pair) { MapTest<std::pair<int, int>>(); } + +template <typename K, int N = 256> +void MultiSetTest() { + EXPECT_EQ( + sizeof(absl::btree_multiset<K>), + 2 * sizeof(void *) + sizeof(typename absl::btree_multiset<K>::size_type)); + using BtreeMSet = absl::btree_multiset<K>; + using CountingBtreeMSet = + absl::btree_multiset<K, std::less<K>, PropagatingCountingAlloc<K>>; + BtreeMultiTest<BtreeMSet, std::multiset<K>>(); + BtreeAllocatorTest<CountingBtreeMSet>(); +} + +template <typename K, int N = 256> +void MultiMapTest() { + EXPECT_EQ(sizeof(absl::btree_multimap<K, K>), + 2 * sizeof(void *) + + sizeof(typename absl::btree_multimap<K, K>::size_type)); + using BtreeMMap = absl::btree_multimap<K, K>; + using CountingBtreeMMap = + absl::btree_multimap<K, K, std::less<K>, + PropagatingCountingAlloc<std::pair<const K, K>>>; + BtreeMultiTest<BtreeMMap, std::multimap<K, K>>(); + BtreeMultiMapTest<BtreeMMap>(); + BtreeAllocatorTest<CountingBtreeMMap>(); +} + +TEST(Btree, multiset_int32) { MultiSetTest<int32_t>(); } +TEST(Btree, multiset_int64) { MultiSetTest<int64_t>(); } +TEST(Btree, multiset_string) { MultiSetTest<std::string>(); } +TEST(Btree, multiset_pair) { MultiSetTest<std::pair<int, int>>(); } +TEST(Btree, multimap_int32) { MultiMapTest<int32_t>(); } +TEST(Btree, multimap_int64) { MultiMapTest<int64_t>(); } +TEST(Btree, multimap_string) { MultiMapTest<std::string>(); } +TEST(Btree, multimap_pair) { MultiMapTest<std::pair<int, int>>(); } + +struct CompareIntToString { + bool operator()(const std::string &a, const std::string &b) const { + return a < b; + } + bool operator()(const std::string &a, int b) const { + return a < absl::StrCat(b); + } + bool operator()(int a, const std::string &b) const { + return absl::StrCat(a) < b; + } + using is_transparent = void; +}; + +struct NonTransparentCompare { + template <typename T, typename U> + bool operator()(const T &t, const U &u) const { + // Treating all comparators as transparent can cause inefficiencies (see + // N3657 C++ proposal). Test that for comparators without 'is_transparent' + // alias (like this one), we do not attempt heterogeneous lookup. + EXPECT_TRUE((std::is_same<T, U>())); + return t < u; + } +}; + +template <typename T> +bool CanEraseWithEmptyBrace(T t, decltype(t.erase({})) *) { + return true; +} + +template <typename T> +bool CanEraseWithEmptyBrace(T, ...) { + return false; +} + +template <typename T> +void TestHeterogeneous(T table) { + auto lb = table.lower_bound("3"); + EXPECT_EQ(lb, table.lower_bound(3)); + EXPECT_NE(lb, table.lower_bound(4)); + EXPECT_EQ(lb, table.lower_bound({"3"})); + EXPECT_NE(lb, table.lower_bound({})); + + auto ub = table.upper_bound("3"); + EXPECT_EQ(ub, table.upper_bound(3)); + EXPECT_NE(ub, table.upper_bound(5)); + EXPECT_EQ(ub, table.upper_bound({"3"})); + EXPECT_NE(ub, table.upper_bound({})); + + auto er = table.equal_range("3"); + EXPECT_EQ(er, table.equal_range(3)); + EXPECT_NE(er, table.equal_range(4)); + EXPECT_EQ(er, table.equal_range({"3"})); + EXPECT_NE(er, table.equal_range({})); + + auto it = table.find("3"); + EXPECT_EQ(it, table.find(3)); + EXPECT_NE(it, table.find(4)); + EXPECT_EQ(it, table.find({"3"})); + EXPECT_NE(it, table.find({})); + + EXPECT_TRUE(table.contains(3)); + EXPECT_FALSE(table.contains(4)); + EXPECT_TRUE(table.count({"3"})); + EXPECT_FALSE(table.contains({})); + + EXPECT_EQ(1, table.count(3)); + EXPECT_EQ(0, table.count(4)); + EXPECT_EQ(1, table.count({"3"})); + EXPECT_EQ(0, table.count({})); + + auto copy = table; + copy.erase(3); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase(4); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase({"5"}); + EXPECT_EQ(table.size() - 2, copy.size()); + EXPECT_FALSE(CanEraseWithEmptyBrace(table, nullptr)); + + // Also run it with const T&. + if (std::is_class<T>()) TestHeterogeneous<const T &>(table); +} + +TEST(Btree, HeterogeneousLookup) { + TestHeterogeneous(btree_set<std::string, CompareIntToString>{"1", "3", "5"}); + TestHeterogeneous(btree_map<std::string, int, CompareIntToString>{ + {"1", 1}, {"3", 3}, {"5", 5}}); + TestHeterogeneous( + btree_multiset<std::string, CompareIntToString>{"1", "3", "5"}); + TestHeterogeneous(btree_multimap<std::string, int, CompareIntToString>{ + {"1", 1}, {"3", 3}, {"5", 5}}); + + // Only maps have .at() + btree_map<std::string, int, CompareIntToString> map{ + {"", -1}, {"1", 1}, {"3", 3}, {"5", 5}}; + EXPECT_EQ(1, map.at(1)); + EXPECT_EQ(3, map.at({"3"})); + EXPECT_EQ(-1, map.at({})); + const auto &cmap = map; + EXPECT_EQ(1, cmap.at(1)); + EXPECT_EQ(3, cmap.at({"3"})); + EXPECT_EQ(-1, cmap.at({})); +} + +TEST(Btree, NoHeterogeneousLookupWithoutAlias) { + using StringSet = absl::btree_set<std::string, NonTransparentCompare>; + StringSet s; + ASSERT_TRUE(s.insert("hello").second); + ASSERT_TRUE(s.insert("world").second); + EXPECT_TRUE(s.end() == s.find("blah")); + EXPECT_TRUE(s.begin() == s.lower_bound("hello")); + EXPECT_EQ(1, s.count("world")); + EXPECT_TRUE(s.contains("hello")); + EXPECT_TRUE(s.contains("world")); + EXPECT_FALSE(s.contains("blah")); + + using StringMultiSet = + absl::btree_multiset<std::string, NonTransparentCompare>; + StringMultiSet ms; + ms.insert("hello"); + ms.insert("world"); + ms.insert("world"); + EXPECT_TRUE(ms.end() == ms.find("blah")); + EXPECT_TRUE(ms.begin() == ms.lower_bound("hello")); + EXPECT_EQ(2, ms.count("world")); + EXPECT_TRUE(ms.contains("hello")); + EXPECT_TRUE(ms.contains("world")); + EXPECT_FALSE(ms.contains("blah")); +} + +TEST(Btree, DefaultTransparent) { + { + // `int` does not have a default transparent comparator. + // The input value is converted to key_type. + btree_set<int> s = {1}; + double d = 1.1; + EXPECT_EQ(s.begin(), s.find(d)); + EXPECT_TRUE(s.contains(d)); + } + + { + // `std::string` has heterogeneous support. + btree_set<std::string> s = {"A"}; + EXPECT_EQ(s.begin(), s.find(absl::string_view("A"))); + EXPECT_TRUE(s.contains(absl::string_view("A"))); + } +} + +class StringLike { + public: + StringLike() = default; + + StringLike(const char *s) : s_(s) { // NOLINT + ++constructor_calls_; + } + + bool operator<(const StringLike &a) const { return s_ < a.s_; } + + static void clear_constructor_call_count() { constructor_calls_ = 0; } + + static int constructor_calls() { return constructor_calls_; } + + private: + static int constructor_calls_; + std::string s_; +}; + +int StringLike::constructor_calls_ = 0; + +TEST(Btree, HeterogeneousLookupDoesntDegradePerformance) { + using StringSet = absl::btree_set<StringLike>; + StringSet s; + for (int i = 0; i < 100; ++i) { + ASSERT_TRUE(s.insert(absl::StrCat(i).c_str()).second); + } + StringLike::clear_constructor_call_count(); + s.find("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.contains("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.count("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.lower_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.upper_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.equal_range("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.erase("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); +} + +// Verify that swapping btrees swaps the key comparison functors and that we can +// use non-default constructible comparators. +struct SubstringLess { + SubstringLess() = delete; + explicit SubstringLess(int length) : n(length) {} + bool operator()(const std::string &a, const std::string &b) const { + return absl::string_view(a).substr(0, n) < + absl::string_view(b).substr(0, n); + } + int n; +}; + +TEST(Btree, SwapKeyCompare) { + using SubstringSet = absl::btree_set<std::string, SubstringLess>; + SubstringSet s1(SubstringLess(1), SubstringSet::allocator_type()); + SubstringSet s2(SubstringLess(2), SubstringSet::allocator_type()); + + ASSERT_TRUE(s1.insert("a").second); + ASSERT_FALSE(s1.insert("aa").second); + + ASSERT_TRUE(s2.insert("a").second); + ASSERT_TRUE(s2.insert("aa").second); + ASSERT_FALSE(s2.insert("aaa").second); + + swap(s1, s2); + + ASSERT_TRUE(s1.insert("b").second); + ASSERT_TRUE(s1.insert("bb").second); + ASSERT_FALSE(s1.insert("bbb").second); + + ASSERT_TRUE(s2.insert("b").second); + ASSERT_FALSE(s2.insert("bb").second); +} + +TEST(Btree, UpperBoundRegression) { + // Regress a bug where upper_bound would default-construct a new key_compare + // instead of copying the existing one. + using SubstringSet = absl::btree_set<std::string, SubstringLess>; + SubstringSet my_set(SubstringLess(3)); + my_set.insert("aab"); + my_set.insert("abb"); + // We call upper_bound("aaa"). If this correctly uses the length 3 + // comparator, aaa < aab < abb, so we should get aab as the result. + // If it instead uses the default-constructed length 2 comparator, + // aa == aa < ab, so we'll get abb as our result. + SubstringSet::iterator it = my_set.upper_bound("aaa"); + ASSERT_TRUE(it != my_set.end()); + EXPECT_EQ("aab", *it); +} + +TEST(Btree, Comparison) { + const int kSetSize = 1201; + absl::btree_set<int64_t> my_set; + for (int i = 0; i < kSetSize; ++i) { + my_set.insert(i); + } + absl::btree_set<int64_t> my_set_copy(my_set); + EXPECT_TRUE(my_set_copy == my_set); + EXPECT_TRUE(my_set == my_set_copy); + EXPECT_FALSE(my_set_copy != my_set); + EXPECT_FALSE(my_set != my_set_copy); + + my_set.insert(kSetSize); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + my_set.erase(kSetSize - 1); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + absl::btree_map<std::string, int64_t> my_map; + for (int i = 0; i < kSetSize; ++i) { + my_map[std::string(i, 'a')] = i; + } + absl::btree_map<std::string, int64_t> my_map_copy(my_map); + EXPECT_TRUE(my_map_copy == my_map); + EXPECT_TRUE(my_map == my_map_copy); + EXPECT_FALSE(my_map_copy != my_map); + EXPECT_FALSE(my_map != my_map_copy); + + ++my_map_copy[std::string(7, 'a')]; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map_copy = my_map; + my_map["hello"] = kSetSize; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map.erase(std::string(kSetSize - 1, 'a')); + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); +} + +TEST(Btree, RangeCtorSanity) { + std::vector<int> ivec; + ivec.push_back(1); + std::map<int, int> imap; + imap.insert(std::make_pair(1, 2)); + absl::btree_multiset<int> tmset(ivec.begin(), ivec.end()); + absl::btree_multimap<int, int> tmmap(imap.begin(), imap.end()); + absl::btree_set<int> tset(ivec.begin(), ivec.end()); + absl::btree_map<int, int> tmap(imap.begin(), imap.end()); + EXPECT_EQ(1, tmset.size()); + EXPECT_EQ(1, tmmap.size()); + EXPECT_EQ(1, tset.size()); + EXPECT_EQ(1, tmap.size()); +} + +TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { + absl::btree_map<std::string, std::unique_ptr<std::string>> m; + + std::unique_ptr<std::string> &v = m["A"]; + EXPECT_TRUE(v == nullptr); + v.reset(new std::string("X")); + + auto iter = m.find("A"); + EXPECT_EQ("X", *iter->second); +} + +TEST(Btree, InitializerListConstructor) { + absl::btree_set<std::string> set({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset<int> mset({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map<int, int> map({{1, 5}, {2, 10}}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + + absl::btree_multimap<int, int> mmap({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +TEST(Btree, InitializerListInsert) { + absl::btree_set<std::string> set; + set.insert({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset<int> mset; + mset.insert({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map<int, int> map; + map.insert({{1, 5}, {2, 10}}); + // Test that inserting one element using an initializer list also works. + map.insert({3, 15}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + EXPECT_EQ(map[3], 15); + + absl::btree_multimap<int, int> mmap; + mmap.insert({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +template <typename Compare, typename K> +void AssertKeyCompareToAdapted() { + using Adapted = typename key_compare_to_adapter<Compare>::type; + static_assert(!std::is_same<Adapted, Compare>::value, + "key_compare_to_adapter should have adapted this comparator."); + static_assert( + std::is_same<absl::weak_ordering, + absl::result_of_t<Adapted(const K &, const K &)>>::value, + "Adapted comparator should be a key-compare-to comparator."); +} +template <typename Compare, typename K> +void AssertKeyCompareToNotAdapted() { + using Unadapted = typename key_compare_to_adapter<Compare>::type; + static_assert( + std::is_same<Unadapted, Compare>::value, + "key_compare_to_adapter shouldn't have adapted this comparator."); + static_assert( + std::is_same<bool, + absl::result_of_t<Unadapted(const K &, const K &)>>::value, + "Un-adapted comparator should return bool."); +} + +TEST(Btree, KeyCompareToAdapter) { + AssertKeyCompareToAdapted<std::less<std::string>, std::string>(); + AssertKeyCompareToAdapted<std::greater<std::string>, std::string>(); + AssertKeyCompareToAdapted<std::less<absl::string_view>, absl::string_view>(); + AssertKeyCompareToAdapted<std::greater<absl::string_view>, + absl::string_view>(); + AssertKeyCompareToNotAdapted<std::less<int>, int>(); + AssertKeyCompareToNotAdapted<std::greater<int>, int>(); +} + +TEST(Btree, RValueInsert) { + InstanceTracker tracker; + + absl::btree_set<MovableOnlyInstance> set; + set.insert(MovableOnlyInstance(1)); + set.insert(MovableOnlyInstance(3)); + MovableOnlyInstance two(2); + set.insert(set.find(MovableOnlyInstance(3)), std::move(two)); + auto it = set.find(MovableOnlyInstance(2)); + ASSERT_NE(it, set.end()); + ASSERT_NE(++it, set.end()); + EXPECT_EQ(it->value(), 3); + + absl::btree_multiset<MovableOnlyInstance> mset; + MovableOnlyInstance zero(0); + MovableOnlyInstance zero2(0); + mset.insert(std::move(zero)); + mset.insert(mset.find(MovableOnlyInstance(0)), std::move(zero2)); + EXPECT_EQ(mset.count(MovableOnlyInstance(0)), 2); + + absl::btree_map<int, MovableOnlyInstance> map; + std::pair<const int, MovableOnlyInstance> p1 = {1, MovableOnlyInstance(5)}; + std::pair<const int, MovableOnlyInstance> p2 = {2, MovableOnlyInstance(10)}; + std::pair<const int, MovableOnlyInstance> p3 = {3, MovableOnlyInstance(15)}; + map.insert(std::move(p1)); + map.insert(std::move(p3)); + map.insert(map.find(3), std::move(p2)); + ASSERT_NE(map.find(2), map.end()); + EXPECT_EQ(map.find(2)->second.value(), 10); + + absl::btree_multimap<int, MovableOnlyInstance> mmap; + std::pair<const int, MovableOnlyInstance> p4 = {1, MovableOnlyInstance(5)}; + std::pair<const int, MovableOnlyInstance> p5 = {1, MovableOnlyInstance(10)}; + mmap.insert(std::move(p4)); + mmap.insert(mmap.find(1), std::move(p5)); + auto range = mmap.equal_range(1); + auto it1 = range.first; + ASSERT_NE(it1, range.second); + EXPECT_EQ(it1->second.value(), 10); + ASSERT_NE(++it1, range.second); + EXPECT_EQ(it1->second.value(), 5); + EXPECT_EQ(++it1, range.second); + + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.swaps(), 0); +} + +} // namespace + +class BtreeNodePeer { + public: + // Yields the size of a leaf node with a specific number of values. + template <typename ValueType> + constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) { + return btree_node< + set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>, + /*TargetNodeSize=*/256, // This parameter isn't used here. + /*Multi=*/false>>::SizeWithNValues(target_values_per_node); + } + + // Yields the number of values in a (non-root) leaf node for this set. + template <typename Set> + constexpr static size_t GetNumValuesPerNode() { + return btree_node<typename Set::params_type>::kNodeValues; + } +}; + +namespace { + +// A btree set with a specific number of values per node. +template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>> +class SizedBtreeSet + : public btree_set_container<btree< + set_params<Key, Cmp, std::allocator<Key>, + BtreeNodePeer::GetTargetNodeSize<Key>(TargetValuesPerNode), + /*Multi=*/false>>> { + using Base = typename SizedBtreeSet::btree_set_container; + + public: + SizedBtreeSet() {} + using Base::Base; +}; + +template <typename Set> +void ExpectOperationCounts(const int expected_moves, + const int expected_comparisons, + const std::vector<int> &values, + InstanceTracker *tracker, Set *set) { + for (const int v : values) set->insert(MovableOnlyInstance(v)); + set->clear(); + EXPECT_EQ(tracker->moves(), expected_moves); + EXPECT_EQ(tracker->comparisons(), expected_comparisons); + EXPECT_EQ(tracker->copies(), 0); + EXPECT_EQ(tracker->swaps(), 0); + tracker->ResetCopiesMovesSwaps(); +} + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTracking) { + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3> set3; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61> set61; + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/100> set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector<int> values = + GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(), + BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>()); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(45281, 132551, values, &tracker, &set3); + ExpectOperationCounts(386718, 129807, values, &tracker, &set61); + ExpectOperationCounts(586761, 130310, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(49951, 119325, values, &tracker, &set3); + ExpectOperationCounts(338813, 118266, values, &tracker, &set61); + ExpectOperationCounts(534529, 125279, values, &tracker, &set100); +} + +struct MovableOnlyInstanceThreeWayCompare { + absl::weak_ordering operator()(const MovableOnlyInstance &a, + const MovableOnlyInstance &b) const { + return a.compare(b); + } +}; + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3, + MovableOnlyInstanceThreeWayCompare> + set3; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61, + MovableOnlyInstanceThreeWayCompare> + set61; + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/100, + MovableOnlyInstanceThreeWayCompare> + set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector<int> values = + GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(), + BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>()); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(45281, 122560, values, &tracker, &set3); + ExpectOperationCounts(386718, 119816, values, &tracker, &set61); + ExpectOperationCounts(586761, 120319, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(49951, 109326, values, &tracker, &set3); + ExpectOperationCounts(338813, 108267, values, &tracker, &set61); + ExpectOperationCounts(534529, 115280, values, &tracker, &set100); +} + +struct NoDefaultCtor { + int num; + explicit NoDefaultCtor(int i) : num(i) {} + + friend bool operator<(const NoDefaultCtor &a, const NoDefaultCtor &b) { + return a.num < b.num; + } +}; + +TEST(Btree, BtreeMapCanHoldNoDefaultCtorTypes) { + absl::btree_map<NoDefaultCtor, NoDefaultCtor> m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + EXPECT_TRUE(m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)).second); + } + EXPECT_FALSE(m.emplace(NoDefaultCtor(78), NoDefaultCtor(0)).second); + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, BtreeMultimapCanHoldNoDefaultCtorTypes) { + absl::btree_multimap<NoDefaultCtor, NoDefaultCtor> m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)); + } + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, MapAt) { + absl::btree_map<int, int> map = {{1, 2}, {2, 4}}; + EXPECT_EQ(map.at(1), 2); + EXPECT_EQ(map.at(2), 4); + map.at(2) = 8; + const absl::btree_map<int, int> &const_map = map; + EXPECT_EQ(const_map.at(1), 2); + EXPECT_EQ(const_map.at(2), 8); +#ifdef ABSL_HAVE_EXCEPTIONS + EXPECT_THROW(map.at(3), std::out_of_range); +#else + EXPECT_DEATH(map.at(3), "absl::btree_map::at"); +#endif +} + +TEST(Btree, BtreeMultisetEmplace) { + const int value_to_insert = 123456; + absl::btree_multiset<int> s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + auto iter2 = s.emplace(value_to_insert); + EXPECT_NE(iter2, iter); + ASSERT_NE(iter2, s.end()); + EXPECT_EQ(*iter2, value_to_insert); + auto result = s.equal_range(value_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultisetEmplaceHint) { + const int value_to_insert = 123456; + absl::btree_multiset<int> s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + auto emplace_iter = s.emplace_hint(iter, value_to_insert); + EXPECT_NE(emplace_iter, iter); + ASSERT_NE(emplace_iter, s.end()); + EXPECT_EQ(*emplace_iter, value_to_insert); +} + +TEST(Btree, BtreeMultimapEmplace) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap<int, std::string> s; + auto iter = s.emplace(key_to_insert, value0); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + auto iter2 = s.emplace(key_to_insert, value1); + EXPECT_NE(iter2, iter); + ASSERT_NE(iter2, s.end()); + EXPECT_EQ(iter2->first, key_to_insert); + EXPECT_EQ(iter2->second, value1); + auto result = s.equal_range(key_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultimapEmplaceHint) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap<int, std::string> s; + auto iter = s.emplace(key_to_insert, value0); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1); + EXPECT_NE(emplace_iter, iter); + ASSERT_NE(emplace_iter, s.end()); + EXPECT_EQ(emplace_iter->first, key_to_insert); + EXPECT_EQ(emplace_iter->second, value1); +} + +TEST(Btree, ConstIteratorAccessors) { + absl::btree_set<int> set; + for (int i = 0; i < 100; ++i) { + set.insert(i); + } + + auto it = set.cbegin(); + auto r_it = set.crbegin(); + for (int i = 0; i < 100; ++i, ++it, ++r_it) { + ASSERT_EQ(*it, i); + ASSERT_EQ(*r_it, 99 - i); + } + EXPECT_EQ(it, set.cend()); + EXPECT_EQ(r_it, set.crend()); +} + +TEST(Btree, StrSplitCompatible) { + const absl::btree_set<std::string> split_set = absl::StrSplit("a,b,c", ','); + const absl::btree_set<std::string> expected_set = {"a", "b", "c"}; + + EXPECT_EQ(split_set, expected_set); +} + +// We can't use EXPECT_EQ/etc. to compare absl::weak_ordering because they +// convert literal 0 to int and absl::weak_ordering can only be compared with +// literal 0. Defining this function allows for avoiding ClangTidy warnings. +bool Identity(const bool b) { return b; } + +TEST(Btree, ValueComp) { + absl::btree_set<int> s; + EXPECT_TRUE(s.value_comp()(1, 2)); + EXPECT_FALSE(s.value_comp()(2, 2)); + EXPECT_FALSE(s.value_comp()(2, 1)); + + absl::btree_map<int, int> m1; + EXPECT_TRUE(m1.value_comp()(std::make_pair(1, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0))); + + absl::btree_map<std::string, int> m2; + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)) < 0)); + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)) == 0)); + EXPECT_TRUE(Identity( + m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)) > 0)); +} + +TEST(Btree, DefaultConstruction) { + absl::btree_set<int> s; + absl::btree_map<int, int> m; + absl::btree_multiset<int> ms; + absl::btree_multimap<int, int> mm; + + EXPECT_TRUE(s.empty()); + EXPECT_TRUE(m.empty()); + EXPECT_TRUE(ms.empty()); + EXPECT_TRUE(mm.empty()); +} + +TEST(Btree, SwissTableHashable) { + static constexpr int kValues = 10000; + std::vector<int> values(kValues); + std::iota(values.begin(), values.end(), 0); + std::vector<std::pair<int, int>> map_values; + for (int v : values) map_values.emplace_back(v, -v); + + using set = absl::btree_set<int>; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + set{}, + set{1}, + set{2}, + set{1, 2}, + set{2, 1}, + set(values.begin(), values.end()), + set(values.rbegin(), values.rend()), + })); + + using mset = absl::btree_multiset<int>; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mset{}, + mset{1}, + mset{1, 1}, + mset{2}, + mset{2, 2}, + mset{1, 2}, + mset{1, 1, 2}, + mset{1, 2, 2}, + mset{1, 1, 2, 2}, + mset(values.begin(), values.end()), + mset(values.rbegin(), values.rend()), + })); + + using map = absl::btree_map<int, int>; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + map{}, + map{{1, 0}}, + map{{1, 1}}, + map{{2, 0}}, + map{{2, 2}}, + map{{1, 0}, {2, 1}}, + map(map_values.begin(), map_values.end()), + map(map_values.rbegin(), map_values.rend()), + })); + + using mmap = absl::btree_multimap<int, int>; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mmap{}, + mmap{{1, 0}}, + mmap{{1, 1}}, + mmap{{1, 0}, {1, 1}}, + mmap{{1, 1}, {1, 0}}, + mmap{{2, 0}}, + mmap{{2, 2}}, + mmap{{1, 0}, {2, 1}}, + mmap(map_values.begin(), map_values.end()), + mmap(map_values.rbegin(), map_values.rend()), + })); +} + +TEST(Btree, ComparableSet) { + absl::btree_set<int> s1 = {1, 2}; + absl::btree_set<int> s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetsDifferentLength) { + absl::btree_set<int> s1 = {1, 2}; + absl::btree_set<int> s2 = {1, 2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); +} + +TEST(Btree, ComparableMultiset) { + absl::btree_multiset<int> s1 = {1, 2}; + absl::btree_multiset<int> s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMap) { + absl::btree_map<int, int> s1 = {{1, 2}}; + absl::btree_map<int, int> s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMultimap) { + absl::btree_multimap<int, int> s1 = {{1, 2}}; + absl::btree_multimap<int, int> s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetWithCustomComparator) { + // As specified by + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3337.pdf section + // [container.requirements.general].12, ordering associative containers always + // uses default '<' operator + // - even if otherwise the container uses custom functor. + absl::btree_set<int, std::greater<int>> s1 = {1, 2}; + absl::btree_set<int, std::greater<int>> s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, EraseReturnsIterator) { + absl::btree_set<int> set = {1, 2, 3, 4, 5}; + auto result_it = set.erase(set.begin(), set.find(3)); + EXPECT_EQ(result_it, set.find(3)); + result_it = set.erase(set.find(5)); + EXPECT_EQ(result_it, set.end()); +} + +TEST(Btree, ExtractAndInsertNodeHandleSet) { + absl::btree_set<int> src1 = {1, 2, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 4, 5)); + absl::btree_set<int> other; + absl::btree_set<int>::insert_return_type res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_set<int> src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.value(), 3); +} + +template <typename Set> +void TestExtractWithTrackingForSet() { + InstanceTracker tracker; + { + Set s; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (s.size() < kSize) { + s.insert(MovableOnlyInstance(s.size())); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = s.extract(MovableOnlyInstance(i)); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node + s.insert(std::move(nh)); + EXPECT_EQ(s.size(), kSize); + + // Extract with iterator + auto it = s.find(MovableOnlyInstance(i)); + nh = s.extract(it); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node and hint + s.insert(s.begin(), std::move(nh)); + EXPECT_EQ(s.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +template <typename Map> +void TestExtractWithTrackingForMap() { + InstanceTracker tracker; + { + Map m; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (m.size() < kSize) { + m.insert( + {CopyableMovableInstance(m.size()), MovableOnlyInstance(m.size())}); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = m.extract(CopyableMovableInstance(i)); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node + m.insert(std::move(nh)); + EXPECT_EQ(m.size(), kSize); + + // Extract with iterator + auto it = m.find(CopyableMovableInstance(i)); + nh = m.extract(it); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node and hint + m.insert(m.begin(), std::move(nh)); + EXPECT_EQ(m.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +TEST(Btree, ExtractTracking) { + TestExtractWithTrackingForSet<absl::btree_set<MovableOnlyInstance>>(); + TestExtractWithTrackingForSet<absl::btree_multiset<MovableOnlyInstance>>(); + TestExtractWithTrackingForMap< + absl::btree_map<CopyableMovableInstance, MovableOnlyInstance>>(); + TestExtractWithTrackingForMap< + absl::btree_multimap<CopyableMovableInstance, MovableOnlyInstance>>(); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiSet) { + absl::btree_multiset<int> src1 = {1, 2, 3, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 3, 4, 5)); + absl::btree_multiset<int> other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multiset<int> src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3, 3)); + EXPECT_EQ(res, ++other.find(3)); +} + +TEST(Btree, ExtractAndInsertNodeHandleMap) { + absl::btree_map<int, int> src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_map<int, int> other; + absl::btree_map<int, int>::insert_return_type res = + other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_map<int, int> src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.key(), 3); + EXPECT_EQ(res.node.mapped(), 6); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiMap) { + absl::btree_multimap<int, int> src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_multimap<int, int> other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multimap<int, int> src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4), Pair(3, 6))); + EXPECT_EQ(res, ++other.begin()); +} + +// For multisets, insert with hint also affects correctness because we need to +// insert immediately before the hint if possible. +struct InsertMultiHintData { + int key; + int not_key; + bool operator==(const InsertMultiHintData other) const { + return key == other.key && not_key == other.not_key; + } +}; + +struct InsertMultiHintDataKeyCompare { + using is_transparent = void; + bool operator()(const InsertMultiHintData a, + const InsertMultiHintData b) const { + return a.key < b.key; + } + bool operator()(const int a, const InsertMultiHintData b) const { + return a < b.key; + } + bool operator()(const InsertMultiHintData a, const int b) const { + return a.key < b; + } +}; + +TEST(Btree, InsertHintNodeHandle) { + // For unique sets, insert with hint is just a performance optimization. + // Test that insert works correctly when the hint is right or wrong. + { + absl::btree_set<int> src = {1, 2, 3, 4, 5}; + auto nh = src.extract(src.find(3)); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + absl::btree_set<int> other = {0, 100}; + // Test a correct hint. + auto it = other.insert(other.lower_bound(3), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 100)); + EXPECT_EQ(it, other.find(3)); + + nh = src.extract(src.find(5)); + // Test an incorrect hint. + it = other.insert(other.end(), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 5, 100)); + EXPECT_EQ(it, other.find(5)); + } + + absl::btree_multiset<InsertMultiHintData, InsertMultiHintDataKeyCompare> src = + {{1, 2}, {3, 4}, {3, 5}}; + auto nh = src.extract(src.lower_bound(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 4})); + absl::btree_multiset<InsertMultiHintData, InsertMultiHintDataKeyCompare> + other = {{3, 1}, {3, 2}, {3, 3}}; + auto it = other.insert(--other.end(), std::move(nh)); + EXPECT_THAT( + other, ElementsAre(InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2}, + InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3})); + EXPECT_EQ(it, --(--other.end())); + + nh = src.extract(src.find(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 5})); + it = other.insert(other.begin(), std::move(nh)); + EXPECT_THAT(other, + ElementsAre(InsertMultiHintData{3, 5}, InsertMultiHintData{3, 1}, + InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4}, + InsertMultiHintData{3, 3})); + EXPECT_EQ(it, other.begin()); +} + +struct IntCompareToCmp { + absl::weak_ordering operator()(int a, int b) const { + if (a < b) return absl::weak_ordering::less; + if (a > b) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } +}; + +TEST(Btree, MergeIntoUniqueContainers) { + absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3}; + absl::btree_multiset<int> src2 = {3, 4, 4, 5}; + absl::btree_set<int> dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoUniqueContainersWithCompareTo) { + absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3}; + absl::btree_multiset<int> src2 = {3, 4, 4, 5}; + absl::btree_set<int, IntCompareToCmp> dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainers) { + absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3}; + absl::btree_multiset<int> src2 = {3, 4, 4, 5}; + absl::btree_multiset<int> dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainersWithCompareTo) { + absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3}; + absl::btree_multiset<int> src2 = {3, 4, 4, 5}; + absl::btree_multiset<int, IntCompareToCmp> dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) { + absl::btree_map<int, int, IntCompareToCmp> src1 = {{1, 1}, {2, 2}, {3, 3}}; + absl::btree_multimap<int, int, std::greater<int>> src2 = { + {5, 5}, {4, 1}, {4, 4}, {3, 2}}; + absl::btree_multimap<int, int> dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3))); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(3, 2), + Pair(4, 1), Pair(4, 4), Pair(5, 5))); +} + +struct KeyCompareToWeakOrdering { + template <typename T> + absl::weak_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::weak_ordering::less + : a == b ? absl::weak_ordering::equivalent + : absl::weak_ordering::greater; + } +}; + +struct KeyCompareToStrongOrdering { + template <typename T> + absl::strong_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::strong_ordering::less + : a == b ? absl::strong_ordering::equal + : absl::strong_ordering::greater; + } +}; + +TEST(Btree, UserProvidedKeyCompareToComparators) { + absl::btree_set<int, KeyCompareToWeakOrdering> weak_set = {1, 2, 3}; + EXPECT_TRUE(weak_set.contains(2)); + EXPECT_FALSE(weak_set.contains(4)); + + absl::btree_set<int, KeyCompareToStrongOrdering> strong_set = {1, 2, 3}; + EXPECT_TRUE(strong_set.contains(2)); + EXPECT_FALSE(strong_set.contains(4)); +} + +TEST(Btree, TryEmplaceBasicTest) { + absl::btree_map<int, std::string> m; + + // Should construct a std::string from the literal. + m.try_emplace(1, "one"); + EXPECT_EQ(1, m.size()); + + // Try other std::string constructors and const lvalue key. + const int key(42); + m.try_emplace(key, 3, 'a'); + m.try_emplace(2, std::string("two")); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + EXPECT_THAT(m, ElementsAreArray(std::vector<std::pair<int, std::string>>{ + {1, "one"}, {2, "two"}, {42, "aaa"}})); +} + +TEST(Btree, TryEmplaceWithHintWorks) { + // Use a counting comparator here to verify that hint is used. + int calls = 0; + auto cmp = [&calls](int x, int y) { + ++calls; + return x < y; + }; + using Cmp = decltype(cmp); + + absl::btree_map<int, int, Cmp> m(cmp); + for (int i = 0; i < 128; ++i) { + m.emplace(i, i); + } + + // Sanity check for the comparator + calls = 0; + m.emplace(127, 127); + EXPECT_GE(calls, 4); + + // Try with begin hint: + calls = 0; + auto it = m.try_emplace(m.begin(), -1, -1); + EXPECT_EQ(129, m.size()); + EXPECT_EQ(it, m.begin()); + EXPECT_LE(calls, 2); + + // Try with end hint: + calls = 0; + std::pair<int, int> pair1024 = {1024, 1024}; + it = m.try_emplace(m.end(), pair1024.first, pair1024.second); + EXPECT_EQ(130, m.size()); + EXPECT_EQ(it, --m.end()); + EXPECT_LE(calls, 2); + + // Try value already present, bad hint; ensure no duplicate added: + calls = 0; + it = m.try_emplace(m.end(), 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_GE(calls, 4); + EXPECT_EQ(it, m.find(16)); + + // Try value already present, hint points directly to it: + calls = 0; + it = m.try_emplace(it, 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + EXPECT_EQ(it, m.find(16)); + + m.erase(2); + EXPECT_EQ(129, m.size()); + auto hint = m.find(3); + // Try emplace in the middle of two other elements. + calls = 0; + m.try_emplace(hint, 2, 2); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithBadHint) { + absl::btree_map<int, int> m = {{1, 1}, {9, 9}}; + + // Bad hint (too small), should still emplace: + auto it = m.try_emplace(m.begin(), 2, 2); + EXPECT_EQ(it, ++m.begin()); + EXPECT_THAT(m, ElementsAreArray( + std::vector<std::pair<int, int>>{{1, 1}, {2, 2}, {9, 9}})); + + // Bad hint, too large this time: + it = m.try_emplace(++(++m.begin()), 0, 0); + EXPECT_EQ(it, m.begin()); + EXPECT_THAT(m, ElementsAreArray(std::vector<std::pair<int, int>>{ + {0, 0}, {1, 1}, {2, 2}, {9, 9}})); +} + +TEST(Btree, TryEmplaceMaintainsSortedOrder) { + absl::btree_map<int, std::string> m; + std::pair<int, std::string> pair5 = {5, "five"}; + + // Test both lvalue & rvalue emplace. + m.try_emplace(10, "ten"); + m.try_emplace(pair5.first, pair5.second); + EXPECT_EQ(2, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + + int int100{100}; + m.try_emplace(int100, "hundred"); + m.try_emplace(1, "one"); + EXPECT_EQ(4, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithHintAndNoValueArgsWorks) { + absl::btree_map<int, int> m; + m.try_emplace(m.end(), 1); + EXPECT_EQ(0, m[1]); +} + +TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) { + absl::btree_map<int, std::string> m; + m.try_emplace(m.end(), 1, 10, 'a'); + EXPECT_EQ(std::string(10, 'a'), m[1]); +} + +TEST(Btree, MoveAssignmentAllocatorPropagation) { + InstanceTracker tracker; + + int64_t bytes1 = 0, bytes2 = 0; + PropagatingCountingAlloc<MovableOnlyInstance> allocator1(&bytes1); + PropagatingCountingAlloc<MovableOnlyInstance> allocator2(&bytes2); + std::less<MovableOnlyInstance> cmp; + + // Test propagating allocator_type. + { + absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>, + PropagatingCountingAlloc<MovableOnlyInstance>> + set1(cmp, allocator1), set2(cmp, allocator2); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_EQ(tracker.moves(), 0); + } + // Test non-propagating allocator_type with equal allocators. + { + absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>, + CountingAllocator<MovableOnlyInstance>> + set1(cmp, allocator1), set2(cmp, allocator1); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_EQ(tracker.moves(), 0); + } + // Test non-propagating allocator_type with different allocators. + { + absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>, + CountingAllocator<MovableOnlyInstance>> + set1(cmp, allocator1), set2(cmp, allocator2); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_GE(tracker.moves(), 100); + } +} + +TEST(Btree, EmptyTree) { + absl::btree_set<int> s; + EXPECT_TRUE(s.empty()); + EXPECT_EQ(s.size(), 0); + EXPECT_GT(s.max_size(), 0); +} + +bool IsEven(int k) { return k % 2 == 0; } + +TEST(Btree, EraseIf) { + // Test that erase_if works with all the container types and supports lambdas. + { + absl::btree_set<int> s = {1, 3, 5, 6, 100}; + erase_if(s, [](int k) { return k > 3; }); + EXPECT_THAT(s, ElementsAre(1, 3)); + } + { + absl::btree_multiset<int> s = {1, 3, 3, 5, 6, 6, 100}; + erase_if(s, [](int k) { return k <= 3; }); + EXPECT_THAT(s, ElementsAre(5, 6, 6, 100)); + } + { + absl::btree_map<int, int> m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}}; + erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; }); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3))); + } + { + absl::btree_multimap<int, int> m = {{1, 1}, {3, 3}, {3, 6}, + {6, 6}, {6, 7}, {100, 6}}; + erase_if(m, [](std::pair<const int, int> kv) { return kv.second == 6; }); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7))); + } + // Test that erasing all elements from a large set works and test support for + // function pointers. + { + absl::btree_set<int> s; + for (int i = 0; i < 1000; ++i) s.insert(2 * i); + erase_if(s, IsEven); + EXPECT_THAT(s, IsEmpty()); + } + // Test that erase_if supports other format of function pointers. + { + absl::btree_set<int> s = {1, 3, 5, 6, 100}; + erase_if(s, &IsEven); + EXPECT_THAT(s, ElementsAre(1, 3, 5)); + } +} + +TEST(Btree, InsertOrAssign) { + absl::btree_map<int, int> m = {{1, 1}, {3, 3}}; + using value_type = typename decltype(m)::value_type; + + auto ret = m.insert_or_assign(4, 4); + EXPECT_EQ(*ret.first, value_type(4, 4)); + EXPECT_TRUE(ret.second); + ret = m.insert_or_assign(3, 100); + EXPECT_EQ(*ret.first, value_type(3, 100)); + EXPECT_FALSE(ret.second); + + auto hint_ret = m.insert_or_assign(ret.first, 3, 200); + EXPECT_EQ(*hint_ret, value_type(3, 200)); + hint_ret = m.insert_or_assign(m.find(1), 0, 1); + EXPECT_EQ(*hint_ret, value_type(0, 1)); + // Test with bad hint. + hint_ret = m.insert_or_assign(m.end(), -1, 1); + EXPECT_EQ(*hint_ret, value_type(-1, 1)); + + EXPECT_THAT(m, ElementsAre(Pair(-1, 1), Pair(0, 1), Pair(1, 1), Pair(3, 200), + Pair(4, 4))); +} + +TEST(Btree, InsertOrAssignMovableOnly) { + absl::btree_map<int, MovableOnlyInstance> m; + using value_type = typename decltype(m)::value_type; + + auto ret = m.insert_or_assign(4, MovableOnlyInstance(4)); + EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(4))); + EXPECT_TRUE(ret.second); + ret = m.insert_or_assign(4, MovableOnlyInstance(100)); + EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(100))); + EXPECT_FALSE(ret.second); + + auto hint_ret = m.insert_or_assign(ret.first, 3, MovableOnlyInstance(200)); + EXPECT_EQ(*hint_ret, value_type(3, MovableOnlyInstance(200))); + + EXPECT_EQ(m.size(), 2); +} + +TEST(Btree, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + absl::btree_map<int, int> m; + m.erase(n); + m.count(n); + m.find(n); + m.contains(n); + m.equal_range(n); + m.insert_or_assign(n, n); + m.insert_or_assign(m.end(), n, n); + m.try_emplace(n); + m.try_emplace(m.end(), n); + m.at(n); + m[n]; +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/container/btree_test.h b/absl/container/btree_test.h new file mode 100644 index 00000000..218ba41d --- /dev/null +++ b/absl/container/btree_test.h @@ -0,0 +1,155 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_BTREE_TEST_H_ +#define ABSL_CONTAINER_BTREE_TEST_H_ + +#include <algorithm> +#include <cassert> +#include <random> +#include <string> +#include <utility> +#include <vector> + +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_set.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Like remove_const but propagates the removal through std::pair. +template <typename T> +struct remove_pair_const { + using type = typename std::remove_const<T>::type; +}; +template <typename T, typename U> +struct remove_pair_const<std::pair<T, U> > { + using type = std::pair<typename remove_pair_const<T>::type, + typename remove_pair_const<U>::type>; +}; + +// Utility class to provide an accessor for a key given a value. The default +// behavior is to treat the value as a pair and return the first element. +template <typename K, typename V> +struct KeyOfValue { + struct type { + const K& operator()(const V& p) const { return p.first; } + }; +}; + +// Partial specialization of KeyOfValue class for when the key and value are +// the same type such as in set<> and btree_set<>. +template <typename K> +struct KeyOfValue<K, K> { + struct type { + const K& operator()(const K& k) const { return k; } + }; +}; + +inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) { + assert(val <= maxval); + constexpr unsigned kBase = 64; // avoid integer division. + unsigned p = 15; + buf[p--] = 0; + while (maxval > 0) { + buf[p--] = ' ' + (val % kBase); + val /= kBase; + maxval /= kBase; + } + return buf + p + 1; +} + +template <typename K> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + K operator()(int i) const { + assert(i <= maxval); + return K(i); + } +}; + +template <> +struct Generator<absl::Time> { + int maxval; + explicit Generator(int m) : maxval(m) {} + absl::Time operator()(int i) const { return absl::FromUnixMillis(i); } +}; + +template <> +struct Generator<std::string> { + int maxval; + explicit Generator(int m) : maxval(m) {} + std::string operator()(int i) const { + char buf[16]; + return GenerateDigits(buf, i, maxval); + } +}; + +template <typename T, typename U> +struct Generator<std::pair<T, U> > { + Generator<typename remove_pair_const<T>::type> tgen; + Generator<typename remove_pair_const<U>::type> ugen; + + explicit Generator(int m) : tgen(m), ugen(m) {} + std::pair<T, U> operator()(int i) const { + return std::make_pair(tgen(i), ugen(i)); + } +}; + +// Generate n values for our tests and benchmarks. Value range is [0, maxval]. +inline std::vector<int> GenerateNumbersWithSeed(int n, int maxval, int seed) { + // NOTE: Some tests rely on generated numbers not changing between test runs. + // We use std::minstd_rand0 because it is well-defined, but don't use + // std::uniform_int_distribution because platforms use different algorithms. + std::minstd_rand0 rng(seed); + + std::vector<int> values; + absl::flat_hash_set<int> unique_values; + if (values.size() < n) { + for (int i = values.size(); i < n; i++) { + int value; + do { + value = static_cast<int>(rng()) % (maxval + 1); + } while (!unique_values.insert(value).second); + + values.push_back(value); + } + } + return values; +} + +// Generates n values in the range [0, maxval]. +template <typename V> +std::vector<V> GenerateValuesWithSeed(int n, int maxval, int seed) { + const std::vector<int> nums = GenerateNumbersWithSeed(n, maxval, seed); + Generator<V> gen(maxval); + std::vector<V> vec; + + vec.reserve(n); + for (int i = 0; i < n; i++) { + vec.push_back(gen(nums[i])); + } + + return vec; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_TEST_H_ diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h index 1e0da5eb..a9ce99ba 100644 --- a/absl/container/fixed_array.h +++ b/absl/container/fixed_array.h @@ -31,7 +31,6 @@ #define ABSL_CONTAINER_FIXED_ARRAY_H_ #include <algorithm> -#include <array> #include <cassert> #include <cstddef> #include <initializer_list> @@ -51,7 +50,7 @@ #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1); @@ -387,8 +386,7 @@ class FixedArray { // error: call to int __builtin___sprintf_chk(etc...) // will always overflow destination buffer [-Werror] // - template <typename OuterT = value_type, - typename InnerT = absl::remove_extent_t<OuterT>, + template <typename OuterT, typename InnerT = absl::remove_extent_t<OuterT>, size_t InnerN = std::extent<OuterT>::value> struct StorageElementWrapper { InnerT array[InnerN]; @@ -397,8 +395,6 @@ class FixedArray { using StorageElement = absl::conditional_t<std::is_array<value_type>::value, StorageElementWrapper<value_type>, value_type>; - using StorageElementBuffer = - absl::aligned_storage_t<sizeof(StorageElement), alignof(StorageElement)>; static pointer AsValueType(pointer ptr) { return ptr; } static pointer AsValueType(StorageElementWrapper<value_type>* ptr) { @@ -408,25 +404,25 @@ class FixedArray { static_assert(sizeof(StorageElement) == sizeof(value_type), ""); static_assert(alignof(StorageElement) == alignof(value_type), ""); - struct NonEmptyInlinedStorage { - StorageElement* data() { - return reinterpret_cast<StorageElement*>(inlined_storage_.data()); - } + class NonEmptyInlinedStorage { + public: + StorageElement* data() { return reinterpret_cast<StorageElement*>(buff_); } + void AnnotateConstruct(size_type n); + void AnnotateDestruct(size_type n); #ifdef ADDRESS_SANITIZER void* RedzoneBegin() { return &redzone_begin_; } void* RedzoneEnd() { return &redzone_end_ + 1; } #endif // ADDRESS_SANITIZER - void AnnotateConstruct(size_type); - void AnnotateDestruct(size_type); - + private: ADDRESS_SANITIZER_REDZONE(redzone_begin_); - std::array<StorageElementBuffer, inline_elements> inlined_storage_; + alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; ADDRESS_SANITIZER_REDZONE(redzone_end_); }; - struct EmptyInlinedStorage { + class EmptyInlinedStorage { + public: StorageElement* data() { return nullptr; } void AnnotateConstruct(size_type) {} void AnnotateDestruct(size_type) {} @@ -460,9 +456,7 @@ class FixedArray { size_type size() const { return size_alloc_.template get<0>(); } StorageElement* begin() const { return data_; } StorageElement* end() const { return begin() + size(); } - allocator_type& alloc() { - return size_alloc_.template get<1>(); - } + allocator_type& alloc() { return size_alloc_.template get<1>(); } private: static bool UsingInlinedStorage(size_type n) { @@ -515,7 +509,7 @@ void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct( #endif // ADDRESS_SANITIZER static_cast<void>(n); // Mark used when not in asan mode } -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/absl/container/fixed_array_exception_safety_test.cc b/absl/container/fixed_array_exception_safety_test.cc index 4a67bb46..a5bb009d 100644 --- a/absl/container/fixed_array_exception_safety_test.cc +++ b/absl/container/fixed_array_exception_safety_test.cc @@ -12,14 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "absl/base/config.h" +#include "absl/container/fixed_array.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + #include <initializer_list> #include "gtest/gtest.h" #include "absl/base/internal/exception_safety_testing.h" -#include "absl/container/fixed_array.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace { @@ -33,10 +37,19 @@ constexpr int kUpdatedValue = 10; using ::testing::TestThrowingCtor; using Thrower = testing::ThrowingValue<testing::TypeSpec::kEverythingThrows>; +using ThrowAlloc = + testing::ThrowingAllocator<Thrower, testing::AllocSpec::kEverythingThrows>; +using MoveThrower = testing::ThrowingValue<testing::TypeSpec::kNoThrowMove>; +using MoveThrowAlloc = + testing::ThrowingAllocator<MoveThrower, + testing::AllocSpec::kEverythingThrows>; + using FixedArr = absl::FixedArray<Thrower, kInlined>; +using FixedArrWithAlloc = absl::FixedArray<Thrower, kInlined, ThrowAlloc>; -using MoveThrower = testing::ThrowingValue<testing::TypeSpec::kNoThrowMove>; using MoveFixedArr = absl::FixedArray<MoveThrower, kInlined>; +using MoveFixedArrWithAlloc = + absl::FixedArray<MoveThrower, kInlined, MoveThrowAlloc>; TEST(FixedArrayExceptionSafety, CopyConstructor) { auto small = FixedArr(kSmallSize); @@ -46,6 +59,14 @@ TEST(FixedArrayExceptionSafety, CopyConstructor) { TestThrowingCtor<FixedArr>(large); } +TEST(FixedArrayExceptionSafety, CopyConstructorWithAlloc) { + auto small = FixedArrWithAlloc(kSmallSize); + TestThrowingCtor<FixedArrWithAlloc>(small); + + auto large = FixedArrWithAlloc(kLargeSize); + TestThrowingCtor<FixedArrWithAlloc>(large); +} + TEST(FixedArrayExceptionSafety, MoveConstructor) { TestThrowingCtor<FixedArr>(FixedArr(kSmallSize)); TestThrowingCtor<FixedArr>(FixedArr(kLargeSize)); @@ -55,16 +76,35 @@ TEST(FixedArrayExceptionSafety, MoveConstructor) { TestThrowingCtor<MoveFixedArr>(MoveFixedArr(kLargeSize)); } +TEST(FixedArrayExceptionSafety, MoveConstructorWithAlloc) { + TestThrowingCtor<FixedArrWithAlloc>(FixedArrWithAlloc(kSmallSize)); + TestThrowingCtor<FixedArrWithAlloc>(FixedArrWithAlloc(kLargeSize)); + + // TypeSpec::kNoThrowMove + TestThrowingCtor<MoveFixedArrWithAlloc>(MoveFixedArrWithAlloc(kSmallSize)); + TestThrowingCtor<MoveFixedArrWithAlloc>(MoveFixedArrWithAlloc(kLargeSize)); +} + TEST(FixedArrayExceptionSafety, SizeConstructor) { TestThrowingCtor<FixedArr>(kSmallSize); TestThrowingCtor<FixedArr>(kLargeSize); } +TEST(FixedArrayExceptionSafety, SizeConstructorWithAlloc) { + TestThrowingCtor<FixedArrWithAlloc>(kSmallSize); + TestThrowingCtor<FixedArrWithAlloc>(kLargeSize); +} + TEST(FixedArrayExceptionSafety, SizeValueConstructor) { TestThrowingCtor<FixedArr>(kSmallSize, Thrower()); TestThrowingCtor<FixedArr>(kLargeSize, Thrower()); } +TEST(FixedArrayExceptionSafety, SizeValueConstructorWithAlloc) { + TestThrowingCtor<FixedArrWithAlloc>(kSmallSize, Thrower()); + TestThrowingCtor<FixedArrWithAlloc>(kLargeSize, Thrower()); +} + TEST(FixedArrayExceptionSafety, IteratorConstructor) { auto small = FixedArr(kSmallSize); TestThrowingCtor<FixedArr>(small.begin(), small.end()); @@ -73,6 +113,14 @@ TEST(FixedArrayExceptionSafety, IteratorConstructor) { TestThrowingCtor<FixedArr>(large.begin(), large.end()); } +TEST(FixedArrayExceptionSafety, IteratorConstructorWithAlloc) { + auto small = FixedArrWithAlloc(kSmallSize); + TestThrowingCtor<FixedArrWithAlloc>(small.begin(), small.end()); + + auto large = FixedArrWithAlloc(kLargeSize); + TestThrowingCtor<FixedArrWithAlloc>(large.begin(), large.end()); +} + TEST(FixedArrayExceptionSafety, InitListConstructor) { constexpr int small_inlined = 3; using SmallFixedArr = absl::FixedArray<Thrower, small_inlined>; @@ -86,7 +134,22 @@ TEST(FixedArrayExceptionSafety, InitListConstructor) { Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); } -testing::AssertionResult ReadMemory(FixedArr* fixed_arr) { +TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) { + constexpr int small_inlined = 3; + using SmallFixedArrWithAlloc = + absl::FixedArray<Thrower, small_inlined, ThrowAlloc>; + + TestThrowingCtor<SmallFixedArrWithAlloc>(std::initializer_list<Thrower>{}); + // Test inlined allocation + TestThrowingCtor<SmallFixedArrWithAlloc>( + std::initializer_list<Thrower>{Thrower{}, Thrower{}}); + // Test out of line allocation + TestThrowingCtor<SmallFixedArrWithAlloc>(std::initializer_list<Thrower>{ + Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); +} + +template <typename FixedArrT> +testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) { // Marked volatile to prevent optimization. Used for running asan tests. volatile int sum = 0; for (const auto& thrower : *fixed_arr) { @@ -97,7 +160,7 @@ testing::AssertionResult ReadMemory(FixedArr* fixed_arr) { TEST(FixedArrayExceptionSafety, Fill) { auto test_fill = testing::MakeExceptionSafetyTester() - .WithContracts(ReadMemory) + .WithContracts(ReadMemory<FixedArr>) .WithOperation([&](FixedArr* fixed_arr_ptr) { auto thrower = Thrower(kUpdatedValue, testing::nothrow_ctor); @@ -112,7 +175,28 @@ TEST(FixedArrayExceptionSafety, Fill) { .Test()); } +TEST(FixedArrayExceptionSafety, FillWithAlloc) { + auto test_fill = testing::MakeExceptionSafetyTester() + .WithContracts(ReadMemory<FixedArrWithAlloc>) + .WithOperation([&](FixedArrWithAlloc* fixed_arr_ptr) { + auto thrower = + Thrower(kUpdatedValue, testing::nothrow_ctor); + fixed_arr_ptr->fill(thrower); + }); + + EXPECT_TRUE(test_fill + .WithInitialValue( + FixedArrWithAlloc(kSmallSize, Thrower(kInitialValue))) + .Test()); + EXPECT_TRUE(test_fill + .WithInitialValue( + FixedArrWithAlloc(kLargeSize, Thrower(kInitialValue))) + .Test()); +} + } // namespace -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl + +#endif // ABSL_HAVE_EXCEPTIONS diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc index 2b1cf47e..c960fe51 100644 --- a/absl/container/fixed_array_test.cc +++ b/absl/container/fixed_array_test.cc @@ -604,19 +604,16 @@ TEST(FixedArrayTest, Fill) { empty.fill(fill_val); } -// TODO(johnsoncj): Investigate InlinedStorage default initialization in GCC 4.x #ifndef __GNUC__ TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) { using T = char; constexpr auto capacity = 10; using FixedArrType = absl::FixedArray<T, capacity>; - using FixedArrBuffType = - absl::aligned_storage_t<sizeof(FixedArrType), alignof(FixedArrType)>; constexpr auto scrubbed_bits = 0x95; constexpr auto length = capacity / 2; - FixedArrBuffType buff; - std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrBuffType)); + alignas(FixedArrType) unsigned char buff[sizeof(FixedArrType)]; + std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrType)); FixedArrType* arr = ::new (static_cast<void*>(std::addressof(buff))) FixedArrType(length); diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index a711398e..fcb70d86 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -42,7 +42,7 @@ #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class K, class V> struct FlatHashMapPolicy; @@ -361,6 +361,10 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // Inserts (via copy or move) the element of the specified key into the // `flat_hash_map` using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. using Base::try_emplace; // flat_hash_map::extract() @@ -398,7 +402,7 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // for the past-the-end iterator, which is invalidated. // // `swap()` requires that the flat hash map's hashing and key equivalence - // functions be Swappable, and are exchaged using unqualified calls to + // functions be Swappable, and are exchanged using unqualified calls to // non-member `swap()`. If the map's allocator has // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call @@ -528,6 +532,15 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< using Base::key_eq; }; +// erase_if(flat_hash_map<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +template <typename K, typename V, typename H, typename E, typename A, + typename Predicate> +void erase_if(flat_hash_map<K, V, H, E, A>& c, Predicate pred) { + container_internal::EraseIf(pred, &c); +} + namespace container_internal { template <class K, class V> @@ -581,7 +594,7 @@ struct IsUnorderedContainer< } // namespace container_algorithm_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc index 3f11a52c..728b693a 100644 --- a/absl/container/flat_hash_map_test.cc +++ b/absl/container/flat_hash_map_test.cc @@ -14,6 +14,8 @@ #include "absl/container/flat_hash_map.h" +#include <memory> + #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/unordered_map_constructor_test.h" #include "absl/container/internal/unordered_map_lookup_test.h" @@ -22,12 +24,13 @@ #include "absl/types/any.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::container_internal::hash_internal::Enum; using ::absl::container_internal::hash_internal::EnumClass; using ::testing::_; +using ::testing::IsEmpty; using ::testing::Pair; using ::testing::UnorderedElementsAre; @@ -47,6 +50,11 @@ INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, LookupTest, MapTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, MembersTest, MapTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ModifiersTest, MapTypes); +using UniquePtrMapTypes = ::testing::Types<Map<int, std::unique_ptr<int>>>; + +INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, UniquePtrModifiersTest, + UniquePtrMapTypes); + TEST(FlatHashMap, StandardLayout) { struct Int { explicit Int(size_t value) : value(value) {} @@ -208,42 +216,44 @@ TEST(FlatHashMap, MergeExtractInsert) { EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9))); } -#if (defined(ABSL_HAVE_STD_ANY) || !defined(_LIBCPP_VERSION)) && \ - !defined(__EMSCRIPTEN__) -TEST(FlatHashMap, Any) { - absl::flat_hash_map<int, absl::any> m; - m.emplace(1, 7); - auto it = m.find(1); - ASSERT_NE(it, m.end()); - EXPECT_EQ(7, absl::any_cast<int>(it->second)); - - m.emplace(std::piecewise_construct, std::make_tuple(2), std::make_tuple(8)); - it = m.find(2); - ASSERT_NE(it, m.end()); - EXPECT_EQ(8, absl::any_cast<int>(it->second)); - - m.emplace(std::piecewise_construct, std::make_tuple(3), - std::make_tuple(absl::any(9))); - it = m.find(3); - ASSERT_NE(it, m.end()); - EXPECT_EQ(9, absl::any_cast<int>(it->second)); - - struct H { - size_t operator()(const absl::any&) const { return 0; } - }; - struct E { - bool operator()(const absl::any&, const absl::any&) const { return true; } - }; - absl::flat_hash_map<absl::any, int, H, E> m2; - m2.emplace(1, 7); - auto it2 = m2.find(1); - ASSERT_NE(it2, m2.end()); - EXPECT_EQ(7, it2->second); +bool FirstIsEven(std::pair<const int, int> p) { return p.first % 2 == 0; } + +TEST(FlatHashMap, EraseIf) { + // Erase all elements. + { + flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, [](std::pair<const int, int>) { return true; }); + EXPECT_THAT(s, IsEmpty()); + } + // Erase no elements. + { + flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, [](std::pair<const int, int>) { return false; }); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), + Pair(4, 4), Pair(5, 5))); + } + // Erase specific elements. + { + flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, + [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; }); + EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); + } + // Predicate is function reference. + { + flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, FirstIsEven); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); + } + // Predicate is function pointer. + { + flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, &FirstIsEven); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); + } } -#endif // (defined(ABSL_HAVE_STD_ANY) || !defined(_LIBCPP_VERSION)) && - // !defined(__EMSCRIPTEN__) } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h index 8adbbcd5..94be6e3d 100644 --- a/absl/container/flat_hash_set.h +++ b/absl/container/flat_hash_set.h @@ -40,7 +40,7 @@ #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <typename T> struct FlatHashSetPolicy; @@ -439,6 +439,14 @@ class flat_hash_set using Base::key_eq; }; +// erase_if(flat_hash_set<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +template <typename T, typename H, typename E, typename A, typename Predicate> +void erase_if(flat_hash_set<T, H, E, A>& c, Predicate pred) { + container_internal::EraseIf(pred, &c); +} + namespace container_internal { template <class T> @@ -489,7 +497,7 @@ struct IsUnorderedContainer<absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>> } // namespace container_algorithm_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc index 56140bbe..40d7f85c 100644 --- a/absl/container/flat_hash_set_test.cc +++ b/absl/container/flat_hash_set_test.cc @@ -25,12 +25,13 @@ #include "absl/strings/string_view.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::container_internal::hash_internal::Enum; using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::IsEmpty; using ::testing::Pointee; using ::testing::UnorderedElementsAre; using ::testing::UnorderedElementsAreArray; @@ -124,7 +125,42 @@ TEST(FlatHashSet, MergeExtractInsert) { EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); } +bool IsEven(int k) { return k % 2 == 0; } + +TEST(FlatHashSet, EraseIf) { + // Erase all elements. + { + flat_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, [](int) { return true; }); + EXPECT_THAT(s, IsEmpty()); + } + // Erase no elements. + { + flat_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, [](int) { return false; }); + EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); + } + // Erase specific elements. + { + flat_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, [](int k) { return k % 2 == 1; }); + EXPECT_THAT(s, UnorderedElementsAre(2, 4)); + } + // Predicate is function reference. + { + flat_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, IsEven); + EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); + } + // Predicate is function pointer. + { + flat_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, &IsEven); + EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); + } +} + } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 27186b15..2388d471 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -54,7 +54,7 @@ #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN // ----------------------------------------------------------------------------- // InlinedVector // ----------------------------------------------------------------------------- @@ -70,9 +70,10 @@ class InlinedVector { static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); using Storage = inlined_vector_internal::Storage<T, N, A>; - using rvalue_reference = typename Storage::rvalue_reference; - using MoveIterator = typename Storage::MoveIterator; + using AllocatorTraits = typename Storage::AllocatorTraits; + using RValueReference = typename Storage::RValueReference; + using MoveIterator = typename Storage::MoveIterator; using IsMemcpyOk = typename Storage::IsMemcpyOk; template <typename Iterator> @@ -93,10 +94,10 @@ class InlinedVector { using value_type = typename Storage::value_type; using pointer = typename Storage::pointer; using const_pointer = typename Storage::const_pointer; - using reference = typename Storage::reference; - using const_reference = typename Storage::const_reference; using size_type = typename Storage::size_type; using difference_type = typename Storage::difference_type; + using reference = typename Storage::reference; + using const_reference = typename Storage::const_reference; using iterator = typename Storage::iterator; using const_iterator = typename Storage::const_iterator; using reverse_iterator = typename Storage::reverse_iterator; @@ -534,7 +535,6 @@ class InlinedVector { } erase(data() + i, data() + size()); - std::copy(first, last, std::back_inserter(*this)); } @@ -565,7 +565,7 @@ class InlinedVector { // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using // move semantics, returning an `iterator` to the newly inserted element. - iterator insert(const_iterator pos, rvalue_reference v) { + iterator insert(const_iterator pos, RValueReference v) { return emplace(pos, std::move(v)); } @@ -662,7 +662,7 @@ class InlinedVector { // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` // using move semantics. - void push_back(rvalue_reference v) { + void push_back(RValueReference v) { static_cast<void>(emplace_back(std::move(v))); } @@ -714,6 +714,7 @@ class InlinedVector { inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), size()); storage_.DeallocateIfAllocated(); + storage_.SetInlinedSize(0); } @@ -841,7 +842,7 @@ H AbslHashValue(H h, const absl::InlinedVector<T, N, A>& a) { return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); } -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index b99bbd62..3f2b4ed2 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -25,42 +25,45 @@ namespace { void BM_InlinedVectorFill(benchmark::State& state) { + const int len = state.range(0); absl::InlinedVector<int, 8> v; - int val = 10; + v.reserve(len); for (auto _ : state) { + v.resize(0); // Use resize(0) as InlinedVector releases storage on clear(). + for (int i = 0; i < len; ++i) { + v.push_back(i); + } benchmark::DoNotOptimize(v); - v.push_back(val); } } -BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024); +BENCHMARK(BM_InlinedVectorFill)->Range(1, 256); void BM_InlinedVectorFillRange(benchmark::State& state) { const int len = state.range(0); - std::unique_ptr<int[]> ia(new int[len]); - for (int i = 0; i < len; i++) { - ia[i] = i; - } - auto* from = ia.get(); - auto* to = from + len; + const std::vector<int> src(len, len); + absl::InlinedVector<int, 8> v; + v.reserve(len); for (auto _ : state) { - benchmark::DoNotOptimize(from); - benchmark::DoNotOptimize(to); - absl::InlinedVector<int, 8> v(from, to); + benchmark::DoNotOptimize(src); + v.assign(src.begin(), src.end()); benchmark::DoNotOptimize(v); } } -BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024); +BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256); void BM_StdVectorFill(benchmark::State& state) { + const int len = state.range(0); std::vector<int> v; - int val = 10; + v.reserve(len); for (auto _ : state) { + v.clear(); + for (int i = 0; i < len; ++i) { + v.push_back(i); + } benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(val); - v.push_back(val); } } -BENCHMARK(BM_StdVectorFill)->Range(0, 1024); +BENCHMARK(BM_StdVectorFill)->Range(1, 256); // The purpose of the next two benchmarks is to verify that // absl::InlinedVector is efficient when moving is more efficent than diff --git a/absl/container/inlined_vector_exception_safety_test.cc b/absl/container/inlined_vector_exception_safety_test.cc index ff0da75b..0e6a05b5 100644 --- a/absl/container/inlined_vector_exception_safety_test.cc +++ b/absl/container/inlined_vector_exception_safety_test.cc @@ -12,6 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "absl/container/inlined_vector.h" + +#include "absl/base/config.h" + +#if defined(ABSL_HAVE_EXCEPTIONS) + #include <array> #include <initializer_list> #include <iterator> @@ -20,7 +26,6 @@ #include "gtest/gtest.h" #include "absl/base/internal/exception_safety_testing.h" -#include "absl/container/inlined_vector.h" namespace { @@ -57,8 +62,8 @@ using ThrowAllocMovableThrowerVec = \ : std::initializer_list<T>{T(0, testing::nothrow_ctor), \ T(1, testing::nothrow_ctor)}) -static_assert((kLargeSize == 8 || kSmallSize == 2), - "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)."); +static_assert(kLargeSize == 8, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); +static_assert(kSmallSize == 2, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); template <typename TheVecT, size_t... TheSizes> class TestParams { @@ -359,9 +364,11 @@ TYPED_TEST(OneSizeTest, EmplaceBack) { using VecT = typename TypeParam::VecT; constexpr static auto size = TypeParam::GetSizeAt(0); + // For testing calls to `emplace_back(...)` that reallocate. VecT full_vec{size}; full_vec.resize(full_vec.capacity()); + // For testing calls to `emplace_back(...)` that don't reallocate. VecT nonfull_vec{size}; nonfull_vec.reserve(size + 1); @@ -369,12 +376,11 @@ TYPED_TEST(OneSizeTest, EmplaceBack) { InlinedVectorInvariants<VecT>); EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) { - vec->emplace_back(); // + vec->emplace_back(); })); - EXPECT_TRUE(tester.WithInitialValue(full_vec).Test([](VecT* vec) { - vec->emplace_back(); // - })); + EXPECT_TRUE(tester.WithInitialValue(full_vec).Test( + [](VecT* vec) { vec->emplace_back(); })); } TYPED_TEST(OneSizeTest, PopBack) { @@ -413,6 +419,19 @@ TYPED_TEST(OneSizeTest, Erase) { EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); + vec->erase(it, it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() / 2); + vec->erase(it, it); + })); + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin() + (vec->size() - 1); + vec->erase(it, it); + })); + + EXPECT_TRUE(tester.Test([](VecT* vec) { + auto it = vec->begin(); vec->erase(it, it + 1); })); EXPECT_TRUE(tester.Test([](VecT* vec) { @@ -447,9 +466,7 @@ TYPED_TEST(TwoSizeTest, Reserve) { .WithInitialValue(VecT{from_size}) .WithContracts(InlinedVectorInvariants<VecT>); - EXPECT_TRUE(tester.Test([](VecT* vec) { - vec->reserve(to_capacity); // - })); + EXPECT_TRUE(tester.Test([](VecT* vec) { vec->reserve(to_capacity); })); } TYPED_TEST(OneSizeTest, ShrinkToFit) { @@ -487,3 +504,5 @@ TYPED_TEST(TwoSizeTest, Swap) { } } // namespace + +#endif // defined(ABSL_HAVE_EXCEPTIONS) diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc index bada4fec..2c9b0d0e 100644 --- a/absl/container/inlined_vector_test.cc +++ b/absl/container/inlined_vector_test.cc @@ -1689,7 +1689,11 @@ TEST(AllocatorSupportTest, ScopedAllocatorWorksInlined) { inlined_case.emplace_back(); int64_t absl_responsible_for_count = total_allocated_byte_count; + + // MSVC's allocator preemptively allocates in debug mode +#if !defined(_MSC_VER) EXPECT_EQ(absl_responsible_for_count, 0); +#endif // !defined(_MSC_VER) inlined_case[0].emplace_back(); EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count); @@ -1750,6 +1754,30 @@ TEST(AllocatorSupportTest, SizeAllocConstructor) { } } +TEST(InlinedVectorTest, MinimumAllocatorCompilesUsingTraits) { + using T = int; + using A = std::allocator<T>; + using ATraits = absl::allocator_traits<A>; + + struct MinimumAllocator { + using value_type = T; + + value_type* allocate(size_t n) { + A a; + return ATraits::allocate(a, n); + } + + void deallocate(value_type* p, size_t n) { + A a; + ATraits::deallocate(a, p, n); + } + }; + + absl::InlinedVector<T, 1, MinimumAllocator> vec; + vec.emplace_back(); + vec.resize(0); +} + TEST(InlinedVectorTest, AbslHashValueWorks) { using V = absl::InlinedVector<int, 4>; std::vector<V> cases; diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h new file mode 100644 index 00000000..fd5c0e7a --- /dev/null +++ b/absl/container/internal/btree.h @@ -0,0 +1,2614 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A btree implementation of the STL set and map interfaces. A btree is smaller +// and generally also faster than STL set/map (refer to the benchmarks below). +// The red-black tree implementation of STL set/map has an overhead of 3 +// pointers (left, right and parent) plus the node color information for each +// stored value. So a set<int32_t> consumes 40 bytes for each value stored in +// 64-bit mode. This btree implementation stores multiple values on fixed +// size nodes (usually 256 bytes) and doesn't store child pointers for leaf +// nodes. The result is that a btree_set<int32_t> may use much less memory per +// stored value. For the random insertion benchmark in btree_bench.cc, a +// btree_set<int32_t> with node-size of 256 uses 5.1 bytes per stored value. +// +// The packing of multiple values on to each node of a btree has another effect +// besides better space utilization: better cache locality due to fewer cache +// lines being accessed. Better cache locality translates into faster +// operations. +// +// CAVEATS +// +// Insertions and deletions on a btree can cause splitting, merging or +// rebalancing of btree nodes. And even without these operations, insertions +// and deletions on a btree will move values around within a node. In both +// cases, the result is that insertions and deletions can invalidate iterators +// pointing to values other than the one being inserted/deleted. Therefore, this +// container does not provide pointer stability. This is notably different from +// STL set/map which takes care to not invalidate iterators on insert/erase +// except, of course, for iterators pointing to the value being erased. A +// partial workaround when erasing is available: erase() returns an iterator +// pointing to the item just after the one that was erased (or end() if none +// exists). + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_H_ + +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <cstring> +#include <functional> +#include <iterator> +#include <limits> +#include <new> +#include <string> +#include <type_traits> +#include <utility> + +#include "absl/base/macros.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A helper class that indicates if the Compare parameter is a key-compare-to +// comparator. +template <typename Compare, typename T> +using btree_is_key_compare_to = + std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>, + absl::weak_ordering>; + +struct StringBtreeDefaultLess { + using is_transparent = void; + + StringBtreeDefaultLess() = default; + + // Compatibility constructor. + StringBtreeDefaultLess(std::less<std::string>) {} // NOLINT + StringBtreeDefaultLess(std::less<string_view>) {} // NOLINT + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } +}; + +struct StringBtreeDefaultGreater { + using is_transparent = void; + + StringBtreeDefaultGreater() = default; + + StringBtreeDefaultGreater(std::greater<std::string>) {} // NOLINT + StringBtreeDefaultGreater(std::greater<string_view>) {} // NOLINT + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } +}; + +// A helper class to convert a boolean comparison into a three-way "compare-to" +// comparison that returns a negative value to indicate less-than, zero to +// indicate equality and a positive value to indicate greater-than. This helper +// class is specialized for less<std::string>, greater<std::string>, +// less<string_view>, and greater<string_view>. +// +// key_compare_to_adapter is provided so that btree users +// automatically get the more efficient compare-to code when using common +// google string types with common comparison functors. +// These string-like specializations also turn on heterogeneous lookup by +// default. +template <typename Compare> +struct key_compare_to_adapter { + using type = Compare; +}; + +template <> +struct key_compare_to_adapter<std::less<std::string>> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_to_adapter<std::greater<std::string>> { + using type = StringBtreeDefaultGreater; +}; + +template <> +struct key_compare_to_adapter<std::less<absl::string_view>> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_to_adapter<std::greater<absl::string_view>> { + using type = StringBtreeDefaultGreater; +}; + +template <typename Key, typename Compare, typename Alloc, int TargetNodeSize, + bool Multi, typename SlotPolicy> +struct common_params { + // If Compare is a common comparator for a std::string-like type, then we adapt it + // to use heterogeneous lookup and to be a key-compare-to comparator. + using key_compare = typename key_compare_to_adapter<Compare>::type; + // A type which indicates if we have a key-compare-to functor or a plain old + // key-compare functor. + using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>; + + using allocator_type = Alloc; + using key_type = Key; + using size_type = std::make_signed<size_t>::type; + using difference_type = ptrdiff_t; + + // True if this is a multiset or multimap. + using is_multi_container = std::integral_constant<bool, Multi>; + + using slot_policy = SlotPolicy; + using slot_type = typename slot_policy::slot_type; + using value_type = typename slot_policy::value_type; + using init_type = typename slot_policy::mutable_value_type; + using pointer = value_type *; + using const_pointer = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + + enum { + kTargetNodeSize = TargetNodeSize, + + // Upper bound for the available space for values. This is largest for leaf + // nodes, which have overhead of at least a pointer + 4 bytes (for storing + // 3 field_types and an enum). + kNodeValueSpace = + TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), + }; + + // This is an integral type large enough to hold as many + // ValueSize-values as will fit a node of TargetNodeSize bytes. + using node_count_type = + absl::conditional_t<(kNodeValueSpace / sizeof(value_type) > + (std::numeric_limits<uint8_t>::max)()), + uint16_t, uint8_t>; // NOLINT + + // The following methods are necessary for passing this struct as PolicyTraits + // for node_handle and/or are used within btree. + static value_type &element(slot_type *slot) { + return slot_policy::element(slot); + } + static const value_type &element(const slot_type *slot) { + return slot_policy::element(slot); + } + template <class... Args> + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + slot_policy::construct(alloc, slot, std::forward<Args>(args)...); + } + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + slot_policy::construct(alloc, slot, other); + } + static void destroy(Alloc *alloc, slot_type *slot) { + slot_policy::destroy(alloc, slot); + } + static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { + construct(alloc, new_slot, old_slot); + destroy(alloc, old_slot); + } + static void swap(Alloc *alloc, slot_type *a, slot_type *b) { + slot_policy::swap(alloc, a, b); + } + static void move(Alloc *alloc, slot_type *src, slot_type *dest) { + slot_policy::move(alloc, src, dest); + } + static void move(Alloc *alloc, slot_type *first, slot_type *last, + slot_type *result) { + slot_policy::move(alloc, first, last, result); + } +}; + +// A parameters structure for holding the type parameters for a btree_map. +// Compare and Alloc should be nothrow copy-constructible. +template <typename Key, typename Data, typename Compare, typename Alloc, + int TargetNodeSize, bool Multi> +struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi, + map_slot_policy<Key, Data>> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + using key_compare = typename super_type::key_compare; + // Inherit from key_compare for empty base class optimization. + struct value_compare : private key_compare { + value_compare() = default; + explicit value_compare(const key_compare &cmp) : key_compare(cmp) {} + + template <typename T, typename U> + auto operator()(const T &left, const U &right) const + -> decltype(std::declval<key_compare>()(left.first, right.first)) { + return key_compare::operator()(left.first, right.first); + } + }; + using is_map_container = std::true_type; + + static const Key &key(const value_type &x) { return x.first; } + static const Key &key(const init_type &x) { return x.first; } + static const Key &key(const slot_type *x) { return slot_policy::key(x); } + static mapped_type &value(value_type *value) { return value->second; } +}; + +// This type implements the necessary functions from the +// absl::container_internal::slot_type interface. +template <typename Key> +struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template <typename Alloc, class... Args> + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + absl::allocator_traits<Alloc>::construct(*alloc, slot, + std::forward<Args>(args)...); + } + + template <typename Alloc> + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other)); + } + + template <typename Alloc> + static void destroy(Alloc *alloc, slot_type *slot) { + absl::allocator_traits<Alloc>::destroy(*alloc, slot); + } + + template <typename Alloc> + static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { + using std::swap; + swap(*a, *b); + } + + template <typename Alloc> + static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { + *dest = std::move(*src); + } + + template <typename Alloc> + static void move(Alloc *alloc, slot_type *first, slot_type *last, + slot_type *result) { + for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) + move(alloc, src, dest); + } +}; + +// A parameters structure for holding the type parameters for a btree_set. +// Compare and Alloc should be nothrow copy-constructible. +template <typename Key, typename Compare, typename Alloc, int TargetNodeSize, + bool Multi> +struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi, + set_slot_policy<Key>> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + using value_compare = typename set_params::common_params::key_compare; + using is_map_container = std::false_type; + + static const Key &key(const value_type &x) { return x; } + static const Key &key(const slot_type *x) { return *x; } +}; + +// An adapter class that converts a lower-bound compare into an upper-bound +// compare. Note: there is no need to make a version of this adapter specialized +// for key-compare-to functors because the upper-bound (the first value greater +// than the input) is never an exact match. +template <typename Compare> +struct upper_bound_adapter { + explicit upper_bound_adapter(const Compare &c) : comp(c) {} + template <typename K, typename LK> + bool operator()(const K &a, const LK &b) const { + // Returns true when a is not greater than b. + return !compare_internal::compare_result_as_less_than(comp(b, a)); + } + + private: + Compare comp; +}; + +enum class MatchKind : uint8_t { kEq, kNe }; + +template <typename V, bool IsCompareTo> +struct SearchResult { + V value; + MatchKind match; + + static constexpr bool HasMatch() { return true; } + bool IsEq() const { return match == MatchKind::kEq; } +}; + +// When we don't use CompareTo, `match` is not present. +// This ensures that callers can't use it accidentally when it provides no +// useful information. +template <typename V> +struct SearchResult<V, false> { + V value; + + static constexpr bool HasMatch() { return false; } + static constexpr bool IsEq() { return false; } +}; + +// A node in the btree holding. The same node type is used for both internal +// and leaf nodes in the btree, though the nodes are allocated in such a way +// that the children array is only valid in internal nodes. +template <typename Params> +class btree_node { + using is_key_compare_to = typename Params::is_key_compare_to; + using is_multi_container = typename Params::is_multi_container; + using field_type = typename Params::node_count_type; + using allocator_type = typename Params::allocator_type; + using slot_type = typename Params::slot_type; + + public: + using params_type = Params; + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using key_compare = typename Params::key_compare; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + + // Btree decides whether to use linear node search as follows: + // - If the key is arithmetic and the comparator is std::less or + // std::greater, choose linear. + // - Otherwise, choose binary. + // TODO(ezb): Might make sense to add condition(s) based on node-size. + using use_linear_search = std::integral_constant< + bool, + std::is_arithmetic<key_type>::value && + (std::is_same<std::less<key_type>, key_compare>::value || + std::is_same<std::greater<key_type>, key_compare>::value)>; + + // This class is organized by gtl::Layout as if it had the following + // structure: + // // A pointer to the node's parent. + // btree_node *parent; + // + // // The position of the node in the node's parent. + // field_type position; + // // The index of the first populated value in `values`. + // // TODO(ezb): right now, `start` is always 0. Update insertion/merge + // // logic to allow for floating storage within nodes. + // field_type start; + // // The index after the last populated value in `values`. Currently, this + // // is the same as the count of values. + // field_type finish; + // // The maximum number of values the node can hold. This is an integer in + // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf + // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal + // // nodes (even though there are still kNodeValues values in the node). + // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) + // // to free extra bits for is_root, etc. + // field_type max_count; + // + // // The array of values. The capacity is `max_count` for leaf nodes and + // // kNodeValues for internal nodes. Only the values in + // // [start, finish) have been initialized and are valid. + // slot_type values[max_count]; + // + // // The array of child pointers. The keys in children[i] are all less + // // than key(i). The keys in children[i + 1] are all greater than key(i). + // // There are 0 children for leaf nodes and kNodeValues + 1 children for + // // internal nodes. + // btree_node *children[kNodeValues + 1]; + // + // This class is only constructed by EmptyNodeType. Normally, pointers to the + // layout above are allocated, cast to btree_node*, and de-allocated within + // the btree implementation. + ~btree_node() = default; + btree_node(btree_node const &) = delete; + btree_node &operator=(btree_node const &) = delete; + + // Public for EmptyNodeType. + constexpr static size_type Alignment() { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), + "Alignment of all nodes must be equal."); + return InternalLayout().Alignment(); + } + + protected: + btree_node() = default; + + private: + using layout_type = absl::container_internal::Layout<btree_node *, field_type, + slot_type, btree_node *>; + constexpr static size_type SizeWithNValues(size_type n) { + return layout_type(/*parent*/ 1, + /*position, start, finish, max_count*/ 4, + /*values*/ n, + /*children*/ 0) + .AllocSize(); + } + // A lower bound for the overhead of fields other than values in a leaf node. + constexpr static size_type MinimumOverhead() { + return SizeWithNValues(1) - sizeof(value_type); + } + + // Compute how many values we can fit onto a leaf node taking into account + // padding. + constexpr static size_type NodeTargetValues(const int begin, const int end) { + return begin == end ? begin + : SizeWithNValues((begin + end) / 2 + 1) > + params_type::kTargetNodeSize + ? NodeTargetValues(begin, (begin + end) / 2) + : NodeTargetValues((begin + end) / 2 + 1, end); + } + + enum { + kTargetNodeSize = params_type::kTargetNodeSize, + kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), + + // We need a minimum of 3 values per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). + kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + kInternalNodeMaxCount = 0, + }; + + // Leaves can have less than kNodeValues values. + constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { + return layout_type(/*parent*/ 1, + /*position, start, finish, max_count*/ 4, + /*values*/ max_values, + /*children*/ 0); + } + constexpr static layout_type InternalLayout() { + return layout_type(/*parent*/ 1, + /*position, start, finish, max_count*/ 4, + /*values*/ kNodeValues, + /*children*/ kNodeValues + 1); + } + constexpr static size_type LeafSize(const int max_values = kNodeValues) { + return LeafLayout(max_values).AllocSize(); + } + constexpr static size_type InternalSize() { + return InternalLayout().AllocSize(); + } + + // N is the index of the type in the Layout definition. + // ElementType<N> is the Nth type in the Layout definition. + template <size_type N> + inline typename layout_type::template ElementType<N> *GetField() { + // We assert that we don't read from values that aren't there. + assert(N < 3 || !leaf()); + return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this)); + } + template <size_type N> + inline const typename layout_type::template ElementType<N> *GetField() const { + assert(N < 3 || !leaf()); + return InternalLayout().template Pointer<N>( + reinterpret_cast<const char *>(this)); + } + void set_parent(btree_node *p) { *GetField<0>() = p; } + field_type &mutable_finish() { return GetField<1>()[2]; } + slot_type *slot(int i) { return &GetField<2>()[i]; } + slot_type *start_slot() { return slot(start()); } + slot_type *finish_slot() { return slot(finish()); } + const slot_type *slot(int i) const { return &GetField<2>()[i]; } + void set_position(field_type v) { GetField<1>()[0] = v; } + void set_start(field_type v) { GetField<1>()[1] = v; } + void set_finish(field_type v) { GetField<1>()[2] = v; } + // This method is only called by the node init methods. + void set_max_count(field_type v) { GetField<1>()[3] = v; } + + public: + // Whether this is a leaf node or not. This value doesn't change after the + // node is created. + bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; } + + // Getter for the position of this node in its parent. + field_type position() const { return GetField<1>()[0]; } + + // Getter for the offset of the first value in the `values` array. + field_type start() const { + // TODO(ezb): when floating storage is implemented, return GetField<1>()[1]; + assert(GetField<1>()[1] == 0); + return 0; + } + + // Getter for the offset after the last value in the `values` array. + field_type finish() const { return GetField<1>()[2]; } + + // Getters for the number of values stored in this node. + field_type count() const { + assert(finish() >= start()); + return finish() - start(); + } + field_type max_count() const { + // Internal nodes have max_count==kInternalNodeMaxCount. + // Leaf nodes have max_count in [1, kNodeValues]. + const field_type max_count = GetField<1>()[3]; + return max_count == field_type{kInternalNodeMaxCount} + ? field_type{kNodeValues} + : max_count; + } + + // Getter for the parent of this node. + btree_node *parent() const { return *GetField<0>(); } + // Getter for whether the node is the root of the tree. The parent of the + // root of the tree is the leftmost node in the tree which is guaranteed to + // be a leaf. + bool is_root() const { return parent()->leaf(); } + void make_root() { + assert(parent()->is_root()); + set_parent(parent()->parent()); + } + + // Getters for the key/value at position i in the node. + const key_type &key(int i) const { return params_type::key(slot(i)); } + reference value(int i) { return params_type::element(slot(i)); } + const_reference value(int i) const { return params_type::element(slot(i)); } + + // Getters/setter for the child at position i in the node. + btree_node *child(int i) const { return GetField<3>()[i]; } + btree_node *start_child() const { return child(start()); } + btree_node *&mutable_child(int i) { return GetField<3>()[i]; } + void clear_child(int i) { + absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); + } + void set_child(int i, btree_node *c) { + absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); + mutable_child(i) = c; + c->set_position(i); + } + void init_child(int i, btree_node *c) { + set_child(i, c); + c->set_parent(this); + } + + // Returns the position of the first value whose key is not less than k. + template <typename K> + SearchResult<int, is_key_compare_to::value> lower_bound( + const K &k, const key_compare &comp) const { + return use_linear_search::value ? linear_search(k, comp) + : binary_search(k, comp); + } + // Returns the position of the first value whose key is greater than k. + template <typename K> + int upper_bound(const K &k, const key_compare &comp) const { + auto upper_compare = upper_bound_adapter<key_compare>(comp); + return use_linear_search::value ? linear_search(k, upper_compare).value + : binary_search(k, upper_compare).value; + } + + template <typename K, typename Compare> + SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value> + linear_search(const K &k, const Compare &comp) const { + return linear_search_impl(k, start(), finish(), comp, + btree_is_key_compare_to<Compare, key_type>()); + } + + template <typename K, typename Compare> + SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value> + binary_search(const K &k, const Compare &comp) const { + return binary_search_impl(k, start(), finish(), comp, + btree_is_key_compare_to<Compare, key_type>()); + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using plain compare. + template <typename K, typename Compare> + SearchResult<int, false> linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s < e) { + if (!comp(key(s), k)) { + break; + } + ++s; + } + return {s}; + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using compare-to. + template <typename K, typename Compare> + SearchResult<int, true> linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::true_type /* IsCompareTo */) const { + while (s < e) { + const absl::weak_ordering c = comp(key(s), k); + if (c == 0) { + return {s, MatchKind::kEq}; + } else if (c > 0) { + break; + } + ++s; + } + return {s, MatchKind::kNe}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using plain compare. + template <typename K, typename Compare> + SearchResult<int, false> binary_search_impl( + const K &k, int s, int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s != e) { + const int mid = (s + e) >> 1; + if (comp(key(mid), k)) { + s = mid + 1; + } else { + e = mid; + } + } + return {s}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using compare-to. + template <typename K, typename CompareTo> + SearchResult<int, true> binary_search_impl( + const K &k, int s, int e, const CompareTo &comp, + std::true_type /* IsCompareTo */) const { + if (is_multi_container::value) { + MatchKind exact_match = MatchKind::kNe; + while (s != e) { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else { + e = mid; + if (c == 0) { + // Need to return the first value whose key is not less than k, + // which requires continuing the binary search if this is a + // multi-container. + exact_match = MatchKind::kEq; + } + } + } + return {s, exact_match}; + } else { // Not a multi-container. + while (s != e) { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else if (c > 0) { + e = mid; + } else { + return {mid, MatchKind::kEq}; + } + } + return {s, MatchKind::kNe}; + } + } + + // Emplaces a value at position i, shifting all existing values and + // children at positions >= i to the right by 1. + template <typename... Args> + void emplace_value(size_type i, allocator_type *alloc, Args &&... args); + + // Removes the value at position i, shifting all existing values and children + // at positions > i to the left by 1. + void remove_value(int i, allocator_type *alloc); + + // Removes the values at positions [i, i + to_erase), shifting all values + // after that range to the left by to_erase. Does not change children at all. + void remove_values_ignore_children(int i, int to_erase, + allocator_type *alloc); + + // Rebalances a node with its right sibling. + void rebalance_right_to_left(int to_move, btree_node *right, + allocator_type *alloc); + void rebalance_left_to_right(int to_move, btree_node *right, + allocator_type *alloc); + + // Splits a node, moving a portion of the node's values to its right sibling. + void split(int insert_position, btree_node *dest, allocator_type *alloc); + + // Merges a node with its right sibling, moving all of the values and the + // delimiting key in the parent node onto itself. + void merge(btree_node *sibling, allocator_type *alloc); + + // Swap the contents of "this" and "src". + void swap(btree_node *src, allocator_type *alloc); + + // Node allocation/deletion routines. + static btree_node *init_leaf(btree_node *n, btree_node *parent, + int max_count) { + n->set_parent(parent); + n->set_position(0); + n->set_start(0); + n->set_finish(0); + n->set_max_count(max_count); + absl::container_internal::SanitizerPoisonMemoryRegion( + n->start_slot(), max_count * sizeof(slot_type)); + return n; + } + static btree_node *init_internal(btree_node *n, btree_node *parent) { + init_leaf(n, parent, kNodeValues); + // Set `max_count` to a sentinel value to indicate that this node is + // internal. + n->set_max_count(kInternalNodeMaxCount); + absl::container_internal::SanitizerPoisonMemoryRegion( + &n->mutable_child(n->start()), + (kNodeValues + 1) * sizeof(btree_node *)); + return n; + } + void destroy(allocator_type *alloc) { + for (int i = start(); i < finish(); ++i) { + value_destroy(i, alloc); + } + } + + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return use_linear_search::value; + } + + private: + template <typename... Args> + void value_init(const size_type i, allocator_type *alloc, Args &&... args) { + absl::container_internal::SanitizerUnpoisonObject(slot(i)); + params_type::construct(alloc, slot(i), std::forward<Args>(args)...); + } + void value_destroy(const size_type i, allocator_type *alloc) { + params_type::destroy(alloc, slot(i)); + absl::container_internal::SanitizerPoisonObject(slot(i)); + } + + // Move n values starting at value i in this node into the values starting at + // value j in node x. + void uninitialized_move_n(const size_type n, const size_type i, + const size_type j, btree_node *x, + allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonMemoryRegion( + x->slot(j), n * sizeof(slot_type)); + for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j); + src != end; ++src, ++dest) { + params_type::construct(alloc, dest, src); + } + } + + // Destroys a range of n values, starting at index i. + void value_destroy_n(const size_type i, const size_type n, + allocator_type *alloc) { + for (int j = 0; j < n; ++j) { + value_destroy(i + j, alloc); + } + } + + template <typename P> + friend class btree; + template <typename N, typename R, typename P> + friend struct btree_iterator; + friend class BtreeNodePeer; +}; + +template <typename Node, typename Reference, typename Pointer> +struct btree_iterator { + private: + using key_type = typename Node::key_type; + using size_type = typename Node::size_type; + using params_type = typename Node::params_type; + + using node_type = Node; + using normal_node = typename std::remove_const<Node>::type; + using const_node = const Node; + using normal_pointer = typename params_type::pointer; + using normal_reference = typename params_type::reference; + using const_pointer = typename params_type::const_pointer; + using const_reference = typename params_type::const_reference; + using slot_type = typename params_type::slot_type; + + using iterator = + btree_iterator<normal_node, normal_reference, normal_pointer>; + using const_iterator = + btree_iterator<const_node, const_reference, const_pointer>; + + public: + // These aliases are public for std::iterator_traits. + using difference_type = typename Node::difference_type; + using value_type = typename params_type::value_type; + using pointer = Pointer; + using reference = Reference; + using iterator_category = std::bidirectional_iterator_tag; + + btree_iterator() : node(nullptr), position(-1) {} + explicit btree_iterator(Node *n) : node(n), position(n->start()) {} + btree_iterator(Node *n, int p) : node(n), position(p) {} + + // NOTE: this SFINAE allows for implicit conversions from iterator to + // const_iterator, but it specifically avoids defining copy constructors so + // that btree_iterator can be trivially copyable. This is for performance and + // binary size reasons. + template <typename N, typename R, typename P, + absl::enable_if_t< + std::is_same<btree_iterator<N, R, P>, iterator>::value && + std::is_same<btree_iterator, const_iterator>::value, + int> = 0> + btree_iterator(const btree_iterator<N, R, P> &x) // NOLINT + : node(x.node), position(x.position) {} + + private: + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids defining a copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template <typename N, typename R, typename P, + absl::enable_if_t< + std::is_same<btree_iterator<N, R, P>, const_iterator>::value && + std::is_same<btree_iterator, iterator>::value, + int> = 0> + explicit btree_iterator(const btree_iterator<N, R, P> &x) + : node(const_cast<node_type *>(x.node)), position(x.position) {} + + // Increment/decrement the iterator. + void increment() { + if (node->leaf() && ++position < node->finish()) { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() { + if (node->leaf() && --position >= node->start()) { + return; + } + decrement_slow(); + } + void decrement_slow(); + + public: + bool operator==(const const_iterator &x) const { + return node == x.node && position == x.position; + } + bool operator!=(const const_iterator &x) const { + return node != x.node || position != x.position; + } + + // Accessors for the key/value the iterator is pointing at. + reference operator*() const { return node->value(position); } + pointer operator->() const { return &node->value(position); } + + btree_iterator &operator++() { + increment(); + return *this; + } + btree_iterator &operator--() { + decrement(); + return *this; + } + btree_iterator operator++(int) { + btree_iterator tmp = *this; + ++*this; + return tmp; + } + btree_iterator operator--(int) { + btree_iterator tmp = *this; + --*this; + return tmp; + } + + private: + template <typename Params> + friend class btree; + template <typename Tree> + friend class btree_container; + template <typename Tree> + friend class btree_set_container; + template <typename Tree> + friend class btree_map_container; + template <typename Tree> + friend class btree_multiset_container; + template <typename N, typename R, typename P> + friend struct btree_iterator; + template <typename TreeType, typename CheckerType> + friend class base_checker; + + const key_type &key() const { return node->key(position); } + slot_type *slot() { return node->slot(position); } + + // The node in the tree the iterator is pointing at. + Node *node; + // The position within the node of the tree the iterator is pointing at. + // TODO(ezb): make this a field_type + int position; +}; + +template <typename Params> +class btree { + using node_type = btree_node<Params>; + using is_key_compare_to = typename Params::is_key_compare_to; + + // We use a static empty node for the root/leftmost/rightmost of empty btrees + // in order to avoid branching in begin()/end(). + struct alignas(node_type::Alignment()) EmptyNodeType : node_type { + using field_type = typename node_type::field_type; + node_type *parent; + field_type position = 0; + field_type start = 0; + field_type finish = 0; + // max_count must be != kInternalNodeMaxCount (so that this node is regarded + // as a leaf node). max_count() is never called when the tree is empty. + field_type max_count = node_type::kInternalNodeMaxCount + 1; + +#ifdef _MSC_VER + // MSVC has constexpr code generations bugs here. + EmptyNodeType() : parent(this) {} +#else + constexpr EmptyNodeType(node_type *p) : parent(p) {} +#endif + }; + + static node_type *EmptyNode() { +#ifdef _MSC_VER + static EmptyNodeType *empty_node = new EmptyNodeType; + // This assert fails on some other construction methods. + assert(empty_node->parent == empty_node); + return empty_node; +#else + static constexpr EmptyNodeType empty_node( + const_cast<EmptyNodeType *>(&empty_node)); + return const_cast<EmptyNodeType *>(&empty_node); +#endif + } + + enum { + kNodeValues = node_type::kNodeValues, + kMinNodeValues = kNodeValues / 2, + }; + + struct node_stats { + using size_type = typename Params::size_type; + + node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {} + + node_stats &operator+=(const node_stats &x) { + leaf_nodes += x.leaf_nodes; + internal_nodes += x.internal_nodes; + return *this; + } + + size_type leaf_nodes; + size_type internal_nodes; + }; + + public: + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + using key_compare = typename Params::key_compare; + using value_compare = typename Params::value_compare; + using allocator_type = typename Params::allocator_type; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using iterator = btree_iterator<node_type, reference, pointer>; + using const_iterator = typename iterator::const_iterator; + using reverse_iterator = std::reverse_iterator<iterator>; + using const_reverse_iterator = std::reverse_iterator<const_iterator>; + using node_handle_type = node_handle<Params, Params, allocator_type>; + + // Internal types made public for use by btree_container types. + using params_type = Params; + using slot_type = typename Params::slot_type; + + private: + // For use in copy_or_move_values_in_order. + const value_type &maybe_move_from_iterator(const_iterator x) { return *x; } + value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); } + + // Copies or moves (depending on the template parameter) the values in + // x into this btree in their order in x. This btree must be empty before this + // method is called. This method is used in copy construction, copy + // assignment, and move assignment. + template <typename Btree> + void copy_or_move_values_in_order(Btree *x); + + // Validates that various assumptions/requirements are true at compile time. + constexpr static bool static_assert_validation(); + + public: + btree(const key_compare &comp, const allocator_type &alloc); + + btree(const btree &x); + btree(btree &&x) noexcept + : root_(std::move(x.root_)), + rightmost_(absl::exchange(x.rightmost_, EmptyNode())), + size_(absl::exchange(x.size_, 0)) { + x.mutable_root() = EmptyNode(); + } + + ~btree() { + // Put static_asserts in destructor to avoid triggering them before the type + // is complete. + static_assert(static_assert_validation(), "This call must be elided."); + clear(); + } + + // Assign the contents of x to *this. + btree &operator=(const btree &x); + btree &operator=(btree &&x) noexcept; + + iterator begin() { return iterator(leftmost()); } + const_iterator begin() const { return const_iterator(leftmost()); } + iterator end() { return iterator(rightmost_, rightmost_->finish()); } + const_iterator end() const { + return const_iterator(rightmost_, rightmost_->finish()); + } + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Finds the first element whose key is not less than key. + template <typename K> + iterator lower_bound(const K &key) { + return internal_end(internal_lower_bound(key)); + } + template <typename K> + const_iterator lower_bound(const K &key) const { + return internal_end(internal_lower_bound(key)); + } + + // Finds the first element whose key is greater than key. + template <typename K> + iterator upper_bound(const K &key) { + return internal_end(internal_upper_bound(key)); + } + template <typename K> + const_iterator upper_bound(const K &key) const { + return internal_end(internal_upper_bound(key)); + } + + // Finds the range of values which compare equal to key. The first member of + // the returned pair is equal to lower_bound(key). The second member pair of + // the pair is equal to upper_bound(key). + template <typename K> + std::pair<iterator, iterator> equal_range(const K &key) { + return {lower_bound(key), upper_bound(key)}; + } + template <typename K> + std::pair<const_iterator, const_iterator> equal_range(const K &key) const { + return {lower_bound(key), upper_bound(key)}; + } + + // Inserts a value into the btree only if it does not already exist. The + // boolean return value indicates whether insertion succeeded or failed. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template <typename... Args> + std::pair<iterator, bool> insert_unique(const key_type &key, Args &&... args); + + // Inserts with hint. Checks to see if the value should be placed immediately + // before `position` in the tree. If so, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_unique() were made. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template <typename... Args> + std::pair<iterator, bool> insert_hint_unique(iterator position, + const key_type &key, + Args &&... args); + + // Insert a range of values into the btree. + template <typename InputIterator> + void insert_iterator_unique(InputIterator b, InputIterator e); + + // Inserts a value into the btree. + template <typename ValueType> + iterator insert_multi(const key_type &key, ValueType &&v); + + // Inserts a value into the btree. + template <typename ValueType> + iterator insert_multi(ValueType &&v) { + return insert_multi(params_type::key(v), std::forward<ValueType>(v)); + } + + // Insert with hint. Check to see if the value should be placed immediately + // before position in the tree. If it does, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_multi(v) were made. + template <typename ValueType> + iterator insert_hint_multi(iterator position, ValueType &&v); + + // Insert a range of values into the btree. + template <typename InputIterator> + void insert_iterator_multi(InputIterator b, InputIterator e); + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + // Requirement: does not read the value at `*iter`. + iterator erase(iterator iter); + + // Erases range. Returns the number of keys erased and an iterator pointing + // to the element after the last erased element. + std::pair<size_type, iterator> erase_range(iterator begin, iterator end); + + // Erases the specified key from the btree. Returns 1 if an element was + // erased and 0 otherwise. + template <typename K> + size_type erase_unique(const K &key); + + // Erases all of the entries matching the specified key from the + // btree. Returns the number of elements erased. + template <typename K> + size_type erase_multi(const K &key); + + // Finds the iterator corresponding to a key or returns end() if the key is + // not present. + template <typename K> + iterator find(const K &key) { + return internal_end(internal_find(key)); + } + template <typename K> + const_iterator find(const K &key) const { + return internal_end(internal_find(key)); + } + + // Returns a count of the number of times the key appears in the btree. + template <typename K> + size_type count_unique(const K &key) const { + const iterator begin = internal_find(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree. + return 0; + } + return 1; + } + // Returns a count of the number of times the key appears in the btree. + template <typename K> + size_type count_multi(const K &key) const { + const auto range = equal_range(key); + return std::distance(range.first, range.second); + } + + // Clear the btree, deleting all of the values it contains. + void clear(); + + // Swap the contents of *this and x. + void swap(btree &x); + + const key_compare &key_comp() const noexcept { + return root_.template get<0>(); + } + template <typename K, typename LK> + bool compare_keys(const K &x, const LK &y) const { + return compare_internal::compare_result_as_less_than(key_comp()(x, y)); + } + + value_compare value_comp() const { return value_compare(key_comp()); } + + // Verifies the structure of the btree. + void verify() const; + + // Size routines. + size_type size() const { return size_; } + size_type max_size() const { return (std::numeric_limits<size_type>::max)(); } + bool empty() const { return size_ == 0; } + + // The height of the btree. An empty tree will have height 0. + size_type height() const { + size_type h = 0; + if (!empty()) { + // Count the length of the chain from the leftmost node up to the + // root. We actually count from the root back around to the level below + // the root, but the calculation is the same because of the circularity + // of that traversal. + const node_type *n = root(); + do { + ++h; + n = n->parent(); + } while (n != root()); + } + return h; + } + + // The number of internal, leaf and total nodes used by the btree. + size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; } + size_type internal_nodes() const { + return internal_stats(root()).internal_nodes; + } + size_type nodes() const { + node_stats stats = internal_stats(root()); + return stats.leaf_nodes + stats.internal_nodes; + } + + // The total number of bytes used by the btree. + size_type bytes_used() const { + node_stats stats = internal_stats(root()); + if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { + return sizeof(*this) + node_type::LeafSize(root()->max_count()); + } else { + return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() + + stats.internal_nodes * node_type::InternalSize(); + } + } + + // The average number of bytes used per value stored in the btree. + static double average_bytes_per_value() { + // Returns the number of bytes per value on a leaf node that is 75% + // full. Experimentally, this matches up nicely with the computed number of + // bytes per value in trees that had their values inserted in random order. + return node_type::LeafSize() / (kNodeValues * 0.75); + } + + // The fullness of the btree. Computed as the number of elements in the btree + // divided by the maximum number of elements a tree with the current number + // of nodes could hold. A value of 1 indicates perfect space + // utilization. Smaller values indicate space wastage. + // Returns 0 for empty trees. + double fullness() const { + if (empty()) return 0.0; + return static_cast<double>(size()) / (nodes() * kNodeValues); + } + // The overhead of the btree structure in bytes per node. Computed as the + // total number of bytes used by the btree minus the number of bytes used for + // storing elements divided by the number of elements. + // Returns 0 for empty trees. + double overhead() const { + if (empty()) return 0.0; + return (bytes_used() - size() * sizeof(value_type)) / + static_cast<double>(size()); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { return allocator(); } + + private: + // Internal accessor routines. + node_type *root() { return root_.template get<2>(); } + const node_type *root() const { return root_.template get<2>(); } + node_type *&mutable_root() noexcept { return root_.template get<2>(); } + key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); } + + // The leftmost node is stored as the parent of the root node. + node_type *leftmost() { return root()->parent(); } + const node_type *leftmost() const { return root()->parent(); } + + // Allocator routines. + allocator_type *mutable_allocator() noexcept { + return &root_.template get<1>(); + } + const allocator_type &allocator() const noexcept { + return root_.template get<1>(); + } + + // Allocates a correctly aligned node of at least size bytes using the + // allocator. + node_type *allocate(const size_type size) { + return reinterpret_cast<node_type *>( + absl::container_internal::Allocate<node_type::Alignment()>( + mutable_allocator(), size)); + } + + // Node creation/deletion routines. + node_type *new_internal_node(node_type *parent) { + node_type *p = allocate(node_type::InternalSize()); + return node_type::init_internal(p, parent); + } + node_type *new_leaf_node(node_type *parent) { + node_type *p = allocate(node_type::LeafSize()); + return node_type::init_leaf(p, parent, kNodeValues); + } + node_type *new_leaf_root_node(const int max_count) { + node_type *p = allocate(node_type::LeafSize(max_count)); + return node_type::init_leaf(p, p, max_count); + } + + // Deletion helper routines. + void erase_same_node(iterator begin, iterator end); + iterator erase_from_leaf_node(iterator begin, size_type to_erase); + iterator rebalance_after_delete(iterator iter); + + // Deallocates a node of a certain size in bytes using the allocator. + void deallocate(const size_type size, node_type *node) { + absl::container_internal::Deallocate<node_type::Alignment()>( + mutable_allocator(), node, size); + } + + void delete_internal_node(node_type *node) { + node->destroy(mutable_allocator()); + deallocate(node_type::InternalSize(), node); + } + void delete_leaf_node(node_type *node) { + node->destroy(mutable_allocator()); + deallocate(node_type::LeafSize(node->max_count()), node); + } + + // Rebalances or splits the node iter points to. + void rebalance_or_split(iterator *iter); + + // Merges the values of left, right and the delimiting key on their parent + // onto left, removing the delimiting key and deleting right. + void merge_nodes(node_type *left, node_type *right); + + // Tries to merge node with its left or right sibling, and failing that, + // rebalance with its left or right sibling. Returns true if a merge + // occurred, at which point it is no longer valid to access node. Returns + // false if no merging took place. + bool try_merge_or_rebalance(iterator *iter); + + // Tries to shrink the height of the tree by 1. + void try_shrink(); + + iterator internal_end(iterator iter) { + return iter.node != nullptr ? iter : end(); + } + const_iterator internal_end(const_iterator iter) const { + return iter.node != nullptr ? iter : end(); + } + + // Emplaces a value into the btree immediately before iter. Requires that + // key(v) <= iter.key() and (--iter).key() <= key(v). + template <typename... Args> + iterator internal_emplace(iterator iter, Args &&... args); + + // Returns an iterator pointing to the first value >= the value "iter" is + // pointing at. Note that "iter" might be pointing to an invalid location such + // as iter.position == iter.node->finish(). This routine simply moves iter up + // in the tree to a valid location. + // Requires: iter.node is non-null. + template <typename IterType> + static IterType internal_last(IterType iter); + + // Returns an iterator pointing to the leaf position at which key would + // reside in the tree. We provide 2 versions of internal_locate. The first + // version uses a less-than comparator and is incapable of distinguishing when + // there is an exact match. The second version is for the key-compare-to + // specialization and distinguishes exact matches. The key-compare-to + // specialization allows the caller to avoid a subsequent comparison to + // determine if an exact match was made, which is important for keys with + // expensive comparison, such as strings. + template <typename K> + SearchResult<iterator, is_key_compare_to::value> internal_locate( + const K &key) const; + + template <typename K> + SearchResult<iterator, false> internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const; + + template <typename K> + SearchResult<iterator, true> internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const; + + // Internal routine which implements lower_bound(). + template <typename K> + iterator internal_lower_bound(const K &key) const; + + // Internal routine which implements upper_bound(). + template <typename K> + iterator internal_upper_bound(const K &key) const; + + // Internal routine which implements find(). + template <typename K> + iterator internal_find(const K &key) const; + + // Deletes a node and all of its children. + void internal_clear(node_type *node); + + // Verifies the tree structure of node. + int internal_verify(const node_type *node, const key_type *lo, + const key_type *hi) const; + + node_stats internal_stats(const node_type *node) const { + // The root can be a static empty node. + if (node == nullptr || (node == root() && empty())) { + return node_stats(0, 0); + } + if (node->leaf()) { + return node_stats(1, 0); + } + node_stats res(0, 1); + for (int i = node->start(); i <= node->finish(); ++i) { + res += internal_stats(node->child(i)); + } + return res; + } + + public: + // Exposed only for tests. + static bool testonly_uses_linear_node_search() { + return node_type::testonly_uses_linear_node_search(); + } + + private: + // We use compressed tuple in order to save space because key_compare and + // allocator_type are usually empty. + absl::container_internal::CompressedTuple<key_compare, allocator_type, + node_type *> + root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. + node_type *rightmost_; + + // Number of values. + size_type size_; +}; + +//// +// btree_node methods +template <typename P> +template <typename... Args> +inline void btree_node<P>::emplace_value(const size_type i, + allocator_type *alloc, + Args &&... args) { + assert(i >= start()); + assert(i <= finish()); + // Shift old values to create space for new value and then construct it in + // place. + if (i < finish()) { + value_init(finish(), alloc, slot(finish() - 1)); + for (size_type j = finish() - 1; j > i; --j) + params_type::move(alloc, slot(j - 1), slot(j)); + value_destroy(i, alloc); + } + value_init(i, alloc, std::forward<Args>(args)...); + set_finish(finish() + 1); + + if (!leaf() && finish() > i + 1) { + for (int j = finish(); j > i + 1; --j) { + set_child(j, child(j - 1)); + } + clear_child(i + 1); + } +} + +template <typename P> +inline void btree_node<P>::remove_value(const int i, allocator_type *alloc) { + if (!leaf() && finish() > i + 1) { + assert(child(i + 1)->count() == 0); + for (size_type j = i + 1; j < finish(); ++j) { + set_child(j, child(j + 1)); + } + clear_child(finish()); + } + + remove_values_ignore_children(i, /*to_erase=*/1, alloc); +} + +template <typename P> +inline void btree_node<P>::remove_values_ignore_children( + const int i, const int to_erase, allocator_type *alloc) { + params_type::move(alloc, slot(i + to_erase), finish_slot(), slot(i)); + value_destroy_n(finish() - to_erase, to_erase, alloc); + set_finish(finish() - to_erase); +} + +template <typename P> +void btree_node<P>::rebalance_right_to_left(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(right->count() >= count()); + assert(to_move >= 1); + assert(to_move <= right->count()); + + // 1) Move the delimiting value in the parent to the left node. + value_init(finish(), alloc, parent()->slot(position())); + + // 2) Move the (to_move - 1) values from the right node to the left node. + right->uninitialized_move_n(to_move - 1, right->start(), finish() + 1, this, + alloc); + + // 3) Move the new delimiting value to the parent from the right node. + params_type::move(alloc, right->slot(to_move - 1), + parent()->slot(position())); + + // 4) Shift the values in the right node to their correct position. + params_type::move(alloc, right->slot(to_move), right->finish_slot(), + right->start_slot()); + + // 5) Destroy the now-empty to_move entries in the right node. + right->value_destroy_n(right->finish() - to_move, to_move, alloc); + + if (!leaf()) { + // Move the child pointers from the right to the left node. + for (int i = 0; i < to_move; ++i) { + init_child(finish() + i + 1, right->child(i)); + } + for (int i = right->start(); i <= right->finish() - to_move; ++i) { + assert(i + to_move <= right->max_count()); + right->init_child(i, right->child(i + to_move)); + right->clear_child(i + to_move); + } + } + + // Fixup `finish` on the left and right nodes. + set_finish(finish() + to_move); + right->set_finish(right->finish() - to_move); +} + +template <typename P> +void btree_node<P>::rebalance_left_to_right(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(count() >= right->count()); + assert(to_move >= 1); + assert(to_move <= count()); + + // Values in the right node are shifted to the right to make room for the + // new to_move values. Then, the delimiting value in the parent and the + // other (to_move - 1) values in the left node are moved into the right node. + // Lastly, a new delimiting value is moved from the left node into the + // parent, and the remaining empty left node entries are destroyed. + + if (right->count() >= to_move) { + // The original location of the right->count() values are sufficient to hold + // the new to_move entries from the parent and left node. + + // 1) Shift existing values in the right node to their correct positions. + right->uninitialized_move_n(to_move, right->finish() - to_move, + right->finish(), right, alloc); + for (slot_type *src = right->slot(right->finish() - to_move - 1), + *dest = right->slot(right->finish() - 1), + *end = right->start_slot(); + src >= end; --src, --dest) { + params_type::move(alloc, src, dest); + } + + // 2) Move the delimiting value in the parent to the right node. + params_type::move(alloc, parent()->slot(position()), + right->slot(to_move - 1)); + + // 3) Move the (to_move - 1) values from the left node to the right node. + params_type::move(alloc, slot(finish() - (to_move - 1)), finish_slot(), + right->start_slot()); + } else { + // The right node does not have enough initialized space to hold the new + // to_move entries, so part of them will move to uninitialized space. + + // 1) Shift existing values in the right node to their correct positions. + right->uninitialized_move_n(right->count(), right->start(), + right->start() + to_move, right, alloc); + + // 2) Move the delimiting value in the parent to the right node. + right->value_init(to_move - 1, alloc, parent()->slot(position())); + + // 3) Move the (to_move - 1) values from the left node to the right node. + const size_type uninitialized_remaining = to_move - right->count() - 1; + uninitialized_move_n(uninitialized_remaining, + finish() - uninitialized_remaining, right->finish(), + right, alloc); + params_type::move(alloc, slot(finish() - (to_move - 1)), + slot(finish() - uninitialized_remaining), + right->start_slot()); + } + + // 4) Move the new delimiting value to the parent from the left node. + params_type::move(alloc, slot(finish() - to_move), + parent()->slot(position())); + + // 5) Destroy the now-empty to_move entries in the left node. + value_destroy_n(finish() - to_move, to_move, alloc); + + if (!leaf()) { + // Move the child pointers from the left to the right node. + for (int i = right->finish(); i >= right->start(); --i) { + right->init_child(i + to_move, right->child(i)); + right->clear_child(i); + } + for (int i = 1; i <= to_move; ++i) { + right->init_child(i - 1, child(finish() - to_move + i)); + clear_child(finish() - to_move + i); + } + } + + // Fixup the counts on the left and right nodes. + set_finish(finish() - to_move); + right->set_finish(right->finish() + to_move); +} + +template <typename P> +void btree_node<P>::split(const int insert_position, btree_node *dest, + allocator_type *alloc) { + assert(dest->count() == 0); + assert(max_count() == kNodeValues); + + // We bias the split based on the position being inserted. If we're + // inserting at the beginning of the left node then bias the split to put + // more values on the right node. If we're inserting at the end of the + // right node then bias the split to put more values on the left node. + if (insert_position == start()) { + dest->set_finish(dest->start() + finish() - 1); + } else if (insert_position == kNodeValues) { + dest->set_finish(dest->start()); + } else { + dest->set_finish(dest->start() + count() / 2); + } + set_finish(finish() - dest->count()); + assert(count() >= 1); + + // Move values from the left sibling to the right sibling. + uninitialized_move_n(dest->count(), finish(), dest->start(), dest, alloc); + + // Destroy the now-empty entries in the left node. + value_destroy_n(finish(), dest->count(), alloc); + + // The split key is the largest value in the left sibling. + --mutable_finish(); + parent()->emplace_value(position(), alloc, finish_slot()); + value_destroy(finish(), alloc); + parent()->init_child(position() + 1, dest); + + if (!leaf()) { + for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); + ++i, ++j) { + assert(child(j) != nullptr); + dest->init_child(i, child(j)); + clear_child(j); + } + } +} + +template <typename P> +void btree_node<P>::merge(btree_node *src, allocator_type *alloc) { + assert(parent() == src->parent()); + assert(position() + 1 == src->position()); + + // Move the delimiting value to the left node. + value_init(finish(), alloc, parent()->slot(position())); + + // Move the values from the right to the left node. + src->uninitialized_move_n(src->count(), src->start(), finish() + 1, this, + alloc); + + // Destroy the now-empty entries in the right node. + src->value_destroy_n(src->start(), src->count(), alloc); + + if (!leaf()) { + // Move the child pointers from the right to the left node. + for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) { + init_child(j, src->child(i)); + src->clear_child(i); + } + } + + // Fixup `finish` on the src and dest nodes. + set_finish(start() + 1 + count() + src->count()); + src->set_finish(src->start()); + + // Remove the value on the parent node. + parent()->remove_value(position(), alloc); +} + +template <typename P> +void btree_node<P>::swap(btree_node *x, allocator_type *alloc) { + using std::swap; + assert(leaf() == x->leaf()); + + // Determine which is the smaller/larger node. + btree_node *smaller = this, *larger = x; + if (smaller->count() > larger->count()) { + swap(smaller, larger); + } + + // Swap the values. + for (slot_type *a = smaller->start_slot(), *b = larger->start_slot(), + *end = smaller->finish_slot(); + a != end; ++a, ++b) { + params_type::swap(alloc, a, b); + } + + // Move values that can't be swapped. + const size_type to_move = larger->count() - smaller->count(); + larger->uninitialized_move_n(to_move, smaller->finish(), smaller->finish(), + smaller, alloc); + larger->value_destroy_n(smaller->finish(), to_move, alloc); + + if (!leaf()) { + // Swap the child pointers. + std::swap_ranges(&smaller->mutable_child(smaller->start()), + &smaller->mutable_child(smaller->finish() + 1), + &larger->mutable_child(larger->start())); + // Update swapped children's parent pointers. + int i = smaller->start(); + int j = larger->start(); + for (; i <= smaller->finish(); ++i, ++j) { + smaller->child(i)->set_parent(smaller); + larger->child(j)->set_parent(larger); + } + // Move the child pointers that couldn't be swapped. + for (; j <= larger->finish(); ++i, ++j) { + smaller->init_child(i, larger->child(j)); + larger->clear_child(j); + } + } + + // Swap the `finish`s. + // TODO(ezb): with floating storage, will also need to swap starts. + swap(mutable_finish(), x->mutable_finish()); +} + +//// +// btree_iterator methods +template <typename N, typename R, typename P> +void btree_iterator<N, R, P>::increment_slow() { + if (node->leaf()) { + assert(position >= node->finish()); + btree_iterator save(*this); + while (position == node->finish() && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position(); + node = node->parent(); + } + if (position == node->finish()) { + *this = save; + } + } else { + assert(position < node->finish()); + node = node->child(position + 1); + while (!node->leaf()) { + node = node->start_child(); + } + position = node->start(); + } +} + +template <typename N, typename R, typename P> +void btree_iterator<N, R, P>::decrement_slow() { + if (node->leaf()) { + assert(position <= -1); + btree_iterator save(*this); + while (position < node->start() && !node->is_root()) { + assert(node->parent()->child(node->position()) == node); + position = node->position() - 1; + node = node->parent(); + } + if (position < node->start()) { + *this = save; + } + } else { + assert(position >= node->start()); + node = node->child(position); + while (!node->leaf()) { + node = node->child(node->finish()); + } + position = node->finish() - 1; + } +} + +//// +// btree methods +template <typename P> +template <typename Btree> +void btree<P>::copy_or_move_values_in_order(Btree *x) { + static_assert(std::is_same<btree, Btree>::value || + std::is_same<const btree, Btree>::value, + "Btree type must be same or const."); + assert(empty()); + + // We can avoid key comparisons because we know the order of the + // values is the same order we'll store them in. + auto iter = x->begin(); + if (iter == x->end()) return; + insert_multi(maybe_move_from_iterator(iter)); + ++iter; + for (; iter != x->end(); ++iter) { + // If the btree is not empty, we can just insert the new value at the end + // of the tree. + internal_emplace(end(), maybe_move_from_iterator(iter)); + } +} + +template <typename P> +constexpr bool btree<P>::static_assert_validation() { + static_assert(std::is_nothrow_copy_constructible<key_compare>::value, + "Key comparison must be nothrow copy constructible"); + static_assert(std::is_nothrow_copy_constructible<allocator_type>::value, + "Allocator must be nothrow copy constructible"); + static_assert(type_traits_internal::is_trivially_copyable<iterator>::value, + "iterator not trivially copyable."); + + // Note: We assert that kTargetValues, which is computed from + // Params::kTargetNodeSize, must fit the node_type::field_type. + static_assert( + kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), + "target node size too large"); + + // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. + using compare_result_type = + absl::result_of_t<key_compare(key_type, key_type)>; + static_assert( + std::is_same<compare_result_type, bool>::value || + std::is_convertible<compare_result_type, absl::weak_ordering>::value, + "key comparison function must return absl::{weak,strong}_ordering or " + "bool."); + + // Test the assumption made in setting kNodeValueSpace. + static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, + "node space assumption incorrect"); + + return true; +} + +template <typename P> +btree<P>::btree(const key_compare &comp, const allocator_type &alloc) + : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} + +template <typename P> +btree<P>::btree(const btree &x) : btree(x.key_comp(), x.allocator()) { + copy_or_move_values_in_order(&x); +} + +template <typename P> +template <typename... Args> +auto btree<P>::insert_unique(const key_type &key, Args &&... args) + -> std::pair<iterator, bool> { + if (empty()) { + mutable_root() = rightmost_ = new_leaf_root_node(1); + } + + auto res = internal_locate(key); + iterator &iter = res.value; + + if (res.HasMatch()) { + if (res.IsEq()) { + // The key already exists in the tree, do nothing. + return {iter, false}; + } + } else { + iterator last = internal_last(iter); + if (last.node && !compare_keys(key, last.key())) { + // The key already exists in the tree, do nothing. + return {last, false}; + } + } + return {internal_emplace(iter, std::forward<Args>(args)...), true}; +} + +template <typename P> +template <typename... Args> +inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key, + Args &&... args) + -> std::pair<iterator, bool> { + if (!empty()) { + if (position == end() || compare_keys(key, position.key())) { + if (position == begin() || compare_keys(std::prev(position).key(), key)) { + // prev.key() < key < position.key() + return {internal_emplace(position, std::forward<Args>(args)...), true}; + } + } else if (compare_keys(position.key(), key)) { + ++position; + if (position == end() || compare_keys(key, position.key())) { + // {original `position`}.key() < key < {current `position`}.key() + return {internal_emplace(position, std::forward<Args>(args)...), true}; + } + } else { + // position.key() == key + return {position, false}; + } + } + return insert_unique(key, std::forward<Args>(args)...); +} + +template <typename P> +template <typename InputIterator> +void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_unique(end(), params_type::key(*b), *b); + } +} + +template <typename P> +template <typename ValueType> +auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator { + if (empty()) { + mutable_root() = rightmost_ = new_leaf_root_node(1); + } + + iterator iter = internal_upper_bound(key); + if (iter.node == nullptr) { + iter = end(); + } + return internal_emplace(iter, std::forward<ValueType>(v)); +} + +template <typename P> +template <typename ValueType> +auto btree<P>::insert_hint_multi(iterator position, ValueType &&v) -> iterator { + if (!empty()) { + const key_type &key = params_type::key(v); + if (position == end() || !compare_keys(position.key(), key)) { + if (position == begin() || + !compare_keys(key, std::prev(position).key())) { + // prev.key() <= key <= position.key() + return internal_emplace(position, std::forward<ValueType>(v)); + } + } else { + ++position; + if (position == end() || !compare_keys(position.key(), key)) { + // {original `position`}.key() < key < {current `position`}.key() + return internal_emplace(position, std::forward<ValueType>(v)); + } + } + } + return insert_multi(std::forward<ValueType>(v)); +} + +template <typename P> +template <typename InputIterator> +void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_multi(end(), *b); + } +} + +template <typename P> +auto btree<P>::operator=(const btree &x) -> btree & { + if (this != &x) { + clear(); + + *mutable_key_comp() = x.key_comp(); + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + *mutable_allocator() = x.allocator(); + } + + copy_or_move_values_in_order(&x); + } + return *this; +} + +template <typename P> +auto btree<P>::operator=(btree &&x) noexcept -> btree & { + if (this != &x) { + clear(); + + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + // Note: `root_` also contains the allocator and the key comparator. + swap(root_, x.root_); + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); + } else { + if (allocator() == x.allocator()) { + swap(mutable_root(), x.mutable_root()); + swap(*mutable_key_comp(), *x.mutable_key_comp()); + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); + } else { + // We aren't allowed to propagate the allocator and the allocator is + // different so we can't take over its memory. We must move each element + // individually. We need both `x` and `this` to have `x`s key comparator + // while moving the values so we can't swap the key comparators. + *mutable_key_comp() = x.key_comp(); + copy_or_move_values_in_order(&x); + } + } + } + return *this; +} + +template <typename P> +auto btree<P>::erase(iterator iter) -> iterator { + bool internal_delete = false; + if (!iter.node->leaf()) { + // Deletion of a value on an internal node. First, move the largest value + // from our left child here, then delete that position (in remove_value() + // below). We can get to the largest value from our left child by + // decrementing iter. + iterator internal_iter(iter); + --iter; + assert(iter.node->leaf()); + params_type::move(mutable_allocator(), iter.node->slot(iter.position), + internal_iter.node->slot(internal_iter.position)); + internal_delete = true; + } + + // Delete the key from the leaf. + iter.node->remove_value(iter.position, mutable_allocator()); + --size_; + + // We want to return the next value after the one we just erased. If we + // erased from an internal node (internal_delete == true), then the next + // value is ++(++iter). If we erased from a leaf node (internal_delete == + // false) then the next value is ++iter. Note that ++iter may point to an + // internal node and the value in the internal node may move to a leaf node + // (iter.node) when rebalancing is performed at the leaf level. + + iterator res = rebalance_after_delete(iter); + + // If we erased from an internal node, advance the iterator. + if (internal_delete) { + ++res; + } + return res; +} + +template <typename P> +auto btree<P>::rebalance_after_delete(iterator iter) -> iterator { + // Merge/rebalance as we walk back up the tree. + iterator res(iter); + bool first_iteration = true; + for (;;) { + if (iter.node == root()) { + try_shrink(); + if (empty()) { + return end(); + } + break; + } + if (iter.node->count() >= kMinNodeValues) { + break; + } + bool merged = try_merge_or_rebalance(&iter); + // On the first iteration, we should update `res` with `iter` because `res` + // may have been invalidated. + if (first_iteration) { + res = iter; + first_iteration = false; + } + if (!merged) { + break; + } + iter.position = iter.node->position(); + iter.node = iter.node->parent(); + } + + // Adjust our return value. If we're pointing at the end of a node, advance + // the iterator. + if (res.position == res.node->finish()) { + res.position = res.node->finish() - 1; + ++res; + } + + return res; +} + +template <typename P> +auto btree<P>::erase_range(iterator begin, iterator end) + -> std::pair<size_type, iterator> { + difference_type count = std::distance(begin, end); + assert(count >= 0); + + if (count == 0) { + return {0, begin}; + } + + if (count == size_) { + clear(); + return {count, this->end()}; + } + + if (begin.node == end.node) { + erase_same_node(begin, end); + size_ -= count; + return {count, rebalance_after_delete(begin)}; + } + + const size_type target_size = size_ - count; + while (size_ > target_size) { + if (begin.node->leaf()) { + const size_type remaining_to_erase = size_ - target_size; + const size_type remaining_in_node = begin.node->finish() - begin.position; + begin = erase_from_leaf_node( + begin, (std::min)(remaining_to_erase, remaining_in_node)); + } else { + begin = erase(begin); + } + } + return {count, begin}; +} + +template <typename P> +void btree<P>::erase_same_node(iterator begin, iterator end) { + assert(begin.node == end.node); + assert(end.position > begin.position); + + node_type *node = begin.node; + size_type to_erase = end.position - begin.position; + if (!node->leaf()) { + // Delete all children between begin and end. + for (size_type i = 0; i < to_erase; ++i) { + internal_clear(node->child(begin.position + i + 1)); + } + // Rotate children after end into new positions. + for (size_type i = begin.position + to_erase + 1; i <= node->finish(); + ++i) { + node->set_child(i - to_erase, node->child(i)); + node->clear_child(i); + } + } + node->remove_values_ignore_children(begin.position, to_erase, + mutable_allocator()); + + // Do not need to update rightmost_, because + // * either end == this->end(), and therefore node == rightmost_, and still + // exists + // * or end != this->end(), and therefore rightmost_ hasn't been erased, since + // it wasn't covered in [begin, end) +} + +template <typename P> +auto btree<P>::erase_from_leaf_node(iterator begin, size_type to_erase) + -> iterator { + node_type *node = begin.node; + assert(node->leaf()); + assert(node->finish() > begin.position); + assert(begin.position + to_erase <= node->finish()); + + node->remove_values_ignore_children(begin.position, to_erase, + mutable_allocator()); + + size_ -= to_erase; + + return rebalance_after_delete(begin); +} + +template <typename P> +template <typename K> +auto btree<P>::erase_unique(const K &key) -> size_type { + const iterator iter = internal_find(key); + if (iter.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + erase(iter); + return 1; +} + +template <typename P> +template <typename K> +auto btree<P>::erase_multi(const K &key) -> size_type { + const iterator begin = internal_lower_bound(key); + if (begin.node == nullptr) { + // The key doesn't exist in the tree, return nothing done. + return 0; + } + // Delete all of the keys between begin and upper_bound(key). + const iterator end = internal_end(internal_upper_bound(key)); + return erase_range(begin, end).first; +} + +template <typename P> +void btree<P>::clear() { + if (!empty()) { + internal_clear(root()); + } + mutable_root() = EmptyNode(); + rightmost_ = EmptyNode(); + size_ = 0; +} + +template <typename P> +void btree<P>::swap(btree &x) { + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_swap::value) { + // Note: `root_` also contains the allocator and the key comparator. + swap(root_, x.root_); + } else { + // It's undefined behavior if the allocators are unequal here. + assert(allocator() == x.allocator()); + swap(mutable_root(), x.mutable_root()); + swap(*mutable_key_comp(), *x.mutable_key_comp()); + } + swap(rightmost_, x.rightmost_); + swap(size_, x.size_); +} + +template <typename P> +void btree<P>::verify() const { + assert(root() != nullptr); + assert(leftmost() != nullptr); + assert(rightmost_ != nullptr); + assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); + assert(leftmost() == (++const_iterator(root(), -1)).node); + assert(rightmost_ == (--const_iterator(root(), root()->finish())).node); + assert(leftmost()->leaf()); + assert(rightmost_->leaf()); +} + +template <typename P> +void btree<P>::rebalance_or_split(iterator *iter) { + node_type *&node = iter->node; + int &insert_position = iter->position; + assert(node->count() == node->max_count()); + assert(kNodeValues == node->max_count()); + + // First try to make room on the node by rebalancing. + node_type *parent = node->parent(); + if (node != root()) { + if (node->position() > parent->start()) { + // Try rebalancing with our left sibling. + node_type *left = parent->child(node->position() - 1); + assert(left->max_count() == kNodeValues); + if (left->count() < kNodeValues) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the end of the right node then we bias rebalancing to + // fill up the left node. + int to_move = (kNodeValues - left->count()) / + (1 + (insert_position < kNodeValues)); + to_move = (std::max)(1, to_move); + + if (insert_position - to_move >= node->start() || + left->count() + to_move < kNodeValues) { + left->rebalance_right_to_left(to_move, node, mutable_allocator()); + + assert(node->max_count() - node->count() == to_move); + insert_position = insert_position - to_move; + if (insert_position < node->start()) { + insert_position = insert_position + left->count() + 1; + node = left; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + if (node->position() < parent->finish()) { + // Try rebalancing with our right sibling. + node_type *right = parent->child(node->position() + 1); + assert(right->max_count() == kNodeValues); + if (right->count() < kNodeValues) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the beginning of the left node then we bias rebalancing + // to fill up the right node. + int to_move = (kNodeValues - right->count()) / + (1 + (insert_position > node->start())); + to_move = (std::max)(1, to_move); + + if (insert_position <= node->finish() - to_move || + right->count() + to_move < kNodeValues) { + node->rebalance_left_to_right(to_move, right, mutable_allocator()); + + if (insert_position > node->finish()) { + insert_position = insert_position - node->count() - 1; + node = right; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + // Rebalancing failed, make sure there is room on the parent node for a new + // value. + assert(parent->max_count() == kNodeValues); + if (parent->count() == kNodeValues) { + iterator parent_iter(node->parent(), node->position()); + rebalance_or_split(&parent_iter); + } + } else { + // Rebalancing not possible because this is the root node. + // Create a new root node and set the current root node as the child of the + // new root. + parent = new_internal_node(parent); + parent->init_child(parent->start(), root()); + mutable_root() = parent; + // If the former root was a leaf node, then it's now the rightmost node. + assert(!parent->start_child()->leaf() || + parent->start_child() == rightmost_); + } + + // Split the node. + node_type *split_node; + if (node->leaf()) { + split_node = new_leaf_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + if (rightmost_ == node) rightmost_ = split_node; + } else { + split_node = new_internal_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + } + + if (insert_position > node->finish()) { + insert_position = insert_position - node->count() - 1; + node = split_node; + } +} + +template <typename P> +void btree<P>::merge_nodes(node_type *left, node_type *right) { + left->merge(right, mutable_allocator()); + if (right->leaf()) { + if (rightmost_ == right) rightmost_ = left; + delete_leaf_node(right); + } else { + delete_internal_node(right); + } +} + +template <typename P> +bool btree<P>::try_merge_or_rebalance(iterator *iter) { + node_type *parent = iter->node->parent(); + if (iter->node->position() > parent->start()) { + // Try merging with our left sibling. + node_type *left = parent->child(iter->node->position() - 1); + assert(left->max_count() == kNodeValues); + if (1 + left->count() + iter->node->count() <= kNodeValues) { + iter->position += 1 + left->count(); + merge_nodes(left, iter->node); + iter->node = left; + return true; + } + } + if (iter->node->position() < parent->finish()) { + // Try merging with our right sibling. + node_type *right = parent->child(iter->node->position() + 1); + assert(right->max_count() == kNodeValues); + if (1 + iter->node->count() + right->count() <= kNodeValues) { + merge_nodes(iter->node, right); + return true; + } + // Try rebalancing with our right sibling. We don't perform rebalancing if + // we deleted the first element from iter->node and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the front of the tree. + if (right->count() > kMinNodeValues && + (iter->node->count() == 0 || iter->position > iter->node->start())) { + int to_move = (right->count() - iter->node->count()) / 2; + to_move = (std::min)(to_move, right->count() - 1); + iter->node->rebalance_right_to_left(to_move, right, mutable_allocator()); + return false; + } + } + if (iter->node->position() > parent->start()) { + // Try rebalancing with our left sibling. We don't perform rebalancing if + // we deleted the last element from iter->node and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the back of the tree. + node_type *left = parent->child(iter->node->position() - 1); + if (left->count() > kMinNodeValues && + (iter->node->count() == 0 || iter->position < iter->node->finish())) { + int to_move = (left->count() - iter->node->count()) / 2; + to_move = (std::min)(to_move, left->count() - 1); + left->rebalance_left_to_right(to_move, iter->node, mutable_allocator()); + iter->position += to_move; + return false; + } + } + return false; +} + +template <typename P> +void btree<P>::try_shrink() { + if (root()->count() > 0) { + return; + } + // Deleted the last item on the root node, shrink the height of the tree. + if (root()->leaf()) { + assert(size() == 0); + delete_leaf_node(root()); + mutable_root() = EmptyNode(); + rightmost_ = EmptyNode(); + } else { + node_type *child = root()->start_child(); + child->make_root(); + delete_internal_node(root()); + mutable_root() = child; + } +} + +template <typename P> +template <typename IterType> +inline IterType btree<P>::internal_last(IterType iter) { + assert(iter.node != nullptr); + while (iter.position == iter.node->finish()) { + iter.position = iter.node->position(); + iter.node = iter.node->parent(); + if (iter.node->leaf()) { + iter.node = nullptr; + break; + } + } + return iter; +} + +template <typename P> +template <typename... Args> +inline auto btree<P>::internal_emplace(iterator iter, Args &&... args) + -> iterator { + if (!iter.node->leaf()) { + // We can't insert on an internal node. Instead, we'll insert after the + // previous value which is guaranteed to be on a leaf node. + --iter; + ++iter.position; + } + const int max_count = iter.node->max_count(); + if (iter.node->count() == max_count) { + // Make room in the leaf for the new item. + if (max_count < kNodeValues) { + // Insertion into the root where the root is smaller than the full node + // size. Simply grow the size of the root node. + assert(iter.node == root()); + iter.node = + new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count)); + iter.node->swap(root(), mutable_allocator()); + delete_leaf_node(root()); + mutable_root() = iter.node; + rightmost_ = iter.node; + } else { + rebalance_or_split(&iter); + } + } + iter.node->emplace_value(iter.position, mutable_allocator(), + std::forward<Args>(args)...); + ++size_; + return iter; +} + +template <typename P> +template <typename K> +inline auto btree<P>::internal_locate(const K &key) const + -> SearchResult<iterator, is_key_compare_to::value> { + return internal_locate_impl(key, is_key_compare_to()); +} + +template <typename P> +template <typename K> +inline auto btree<P>::internal_locate_impl( + const K &key, std::false_type /* IsCompareTo */) const + -> SearchResult<iterator, false> { + iterator iter(const_cast<node_type *>(root())); + for (;;) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + // NOTE: we don't need to walk all the way down the tree if the keys are + // equal, but determining equality would require doing an extra comparison + // on each node on the way down, and we will need to go all the way to the + // leaf node in the expected case. + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return {iter}; +} + +template <typename P> +template <typename K> +inline auto btree<P>::internal_locate_impl( + const K &key, std::true_type /* IsCompareTo */) const + -> SearchResult<iterator, true> { + iterator iter(const_cast<node_type *>(root())); + for (;;) { + SearchResult<int, true> res = iter.node->lower_bound(key, key_comp()); + iter.position = res.value; + if (res.match == MatchKind::kEq) { + return {iter, MatchKind::kEq}; + } + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return {iter, MatchKind::kNe}; +} + +template <typename P> +template <typename K> +auto btree<P>::internal_lower_bound(const K &key) const -> iterator { + iterator iter(const_cast<node_type *>(root())); + for (;;) { + iter.position = iter.node->lower_bound(key, key_comp()).value; + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return internal_last(iter); +} + +template <typename P> +template <typename K> +auto btree<P>::internal_upper_bound(const K &key) const -> iterator { + iterator iter(const_cast<node_type *>(root())); + for (;;) { + iter.position = iter.node->upper_bound(key, key_comp()); + if (iter.node->leaf()) { + break; + } + iter.node = iter.node->child(iter.position); + } + return internal_last(iter); +} + +template <typename P> +template <typename K> +auto btree<P>::internal_find(const K &key) const -> iterator { + auto res = internal_locate(key); + if (res.HasMatch()) { + if (res.IsEq()) { + return res.value; + } + } else { + const iterator iter = internal_last(res.value); + if (iter.node != nullptr && !compare_keys(key, iter.key())) { + return iter; + } + } + return {nullptr, 0}; +} + +template <typename P> +void btree<P>::internal_clear(node_type *node) { + if (!node->leaf()) { + for (int i = node->start(); i <= node->finish(); ++i) { + internal_clear(node->child(i)); + } + delete_internal_node(node); + } else { + delete_leaf_node(node); + } +} + +template <typename P> +int btree<P>::internal_verify(const node_type *node, const key_type *lo, + const key_type *hi) const { + assert(node->count() > 0); + assert(node->count() <= node->max_count()); + if (lo) { + assert(!compare_keys(node->key(node->start()), *lo)); + } + if (hi) { + assert(!compare_keys(*hi, node->key(node->finish() - 1))); + } + for (int i = node->start() + 1; i < node->finish(); ++i) { + assert(!compare_keys(node->key(i), node->key(i - 1))); + } + int count = node->count(); + if (!node->leaf()) { + for (int i = node->start(); i <= node->finish(); ++i) { + assert(node->child(i) != nullptr); + assert(node->child(i)->parent() == node); + assert(node->child(i)->position() == i); + count += internal_verify(node->child(i), + i == node->start() ? lo : &node->key(i - 1), + i == node->finish() ? hi : &node->key(i)); + } + } + return count; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_ diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h new file mode 100644 index 00000000..f2e4c3a5 --- /dev/null +++ b/absl/container/internal/btree_container.h @@ -0,0 +1,672 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ + +#include <algorithm> +#include <initializer_list> +#include <iterator> +#include <utility> + +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/common.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A common base class for btree_set, btree_map, btree_multiset, and +// btree_multimap. +template <typename Tree> +class btree_container { + using params_type = typename Tree::params_type; + + protected: + // Alias used for heterogeneous lookup functions. + // `key_arg<K>` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template <class K> + using key_arg = + typename KeyArg<IsTransparent<typename Tree::key_compare>::value>:: + template type<K, typename Tree::key_type>; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using difference_type = typename Tree::difference_type; + using key_compare = typename Tree::key_compare; + using value_compare = typename Tree::value_compare; + using allocator_type = typename Tree::allocator_type; + using reference = typename Tree::reference; + using const_reference = typename Tree::const_reference; + using pointer = typename Tree::pointer; + using const_pointer = typename Tree::const_pointer; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using reverse_iterator = typename Tree::reverse_iterator; + using const_reverse_iterator = typename Tree::const_reverse_iterator; + using node_type = typename Tree::node_handle_type; + + // Constructors/assignments. + btree_container() : tree_(key_compare(), allocator_type()) {} + explicit btree_container(const key_compare &comp, + const allocator_type &alloc = allocator_type()) + : tree_(comp, alloc) {} + btree_container(const btree_container &x) = default; + btree_container(btree_container &&x) noexcept = default; + btree_container &operator=(const btree_container &x) = default; + btree_container &operator=(btree_container &&x) noexcept( + std::is_nothrow_move_assignable<Tree>::value) = default; + + // Iterator routines. + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + const_iterator cbegin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + const_iterator cend() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + const_reverse_iterator crbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + const_reverse_iterator crend() const { return tree_.rend(); } + + // Lookup routines. + template <typename K = key_type> + iterator find(const key_arg<K> &key) { + return tree_.find(key); + } + template <typename K = key_type> + const_iterator find(const key_arg<K> &key) const { + return tree_.find(key); + } + template <typename K = key_type> + bool contains(const key_arg<K> &key) const { + return find(key) != end(); + } + template <typename K = key_type> + iterator lower_bound(const key_arg<K> &key) { + return tree_.lower_bound(key); + } + template <typename K = key_type> + const_iterator lower_bound(const key_arg<K> &key) const { + return tree_.lower_bound(key); + } + template <typename K = key_type> + iterator upper_bound(const key_arg<K> &key) { + return tree_.upper_bound(key); + } + template <typename K = key_type> + const_iterator upper_bound(const key_arg<K> &key) const { + return tree_.upper_bound(key); + } + template <typename K = key_type> + std::pair<iterator, iterator> equal_range(const key_arg<K> &key) { + return tree_.equal_range(key); + } + template <typename K = key_type> + std::pair<const_iterator, const_iterator> equal_range( + const key_arg<K> &key) const { + return tree_.equal_range(key); + } + + // Deletion routines. Note that there is also a deletion routine that is + // specific to btree_set_container/btree_multiset_container. + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); } + iterator erase(iterator iter) { return tree_.erase(iter); } + iterator erase(const_iterator first, const_iterator last) { + return tree_.erase_range(iterator(first), iterator(last)).second; + } + + // Extract routines. + node_type extract(iterator position) { + // Use Move instead of Transfer, because the rebalancing code expects to + // have a valid object to scribble metadata bits on top of. + auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot()); + erase(position); + return node; + } + node_type extract(const_iterator position) { + return extract(iterator(position)); + } + + public: + // Utility routines. + void clear() { tree_.clear(); } + void swap(btree_container &x) { tree_.swap(x.tree_); } + void verify() const { tree_.verify(); } + + // Size routines. + size_type size() const { return tree_.size(); } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { return tree_.empty(); } + + friend bool operator==(const btree_container &x, const btree_container &y) { + if (x.size() != y.size()) return false; + return std::equal(x.begin(), x.end(), y.begin()); + } + + friend bool operator!=(const btree_container &x, const btree_container &y) { + return !(x == y); + } + + friend bool operator<(const btree_container &x, const btree_container &y) { + return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); + } + + friend bool operator>(const btree_container &x, const btree_container &y) { + return y < x; + } + + friend bool operator<=(const btree_container &x, const btree_container &y) { + return !(y < x); + } + + friend bool operator>=(const btree_container &x, const btree_container &y) { + return !(x < y); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { return tree_.get_allocator(); } + + // The key comparator used by the btree. + key_compare key_comp() const { return tree_.key_comp(); } + value_compare value_comp() const { return tree_.value_comp(); } + + // Support absl::Hash. + template <typename State> + friend State AbslHashValue(State h, const btree_container &b) { + for (const auto &v : b) { + h = State::combine(std::move(h), v); + } + return State::combine(std::move(h), b.size()); + } + + protected: + Tree tree_; +}; + +// A common base class for btree_set and btree_map. +template <typename Tree> +class btree_set_container : public btree_container<Tree> { + using super_type = btree_container<Tree>; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + protected: + template <class K> + using key_arg = typename super_type::template key_arg<K>; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + using insert_return_type = InsertReturnType<iterator, node_type>; + + // Inherit constructors. + using super_type::super_type; + btree_set_container() {} + + // Range constructor. + template <class InputIterator> + btree_set_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + + // Initializer list constructor. + btree_set_container(std::initializer_list<init_type> init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_set_container(init.begin(), init.end(), comp, alloc) {} + + // Lookup routines. + template <typename K = key_type> + size_type count(const key_arg<K> &key) const { + return this->tree_.count_unique(key); + } + + // Insertion routines. + std::pair<iterator, bool> insert(const value_type &x) { + return this->tree_.insert_unique(params_type::key(x), x); + } + std::pair<iterator, bool> insert(value_type &&x) { + return this->tree_.insert_unique(params_type::key(x), std::move(x)); + } + template <typename... Args> + std::pair<iterator, bool> emplace(Args &&... args) { + init_type v(std::forward<Args>(args)...); + return this->tree_.insert_unique(params_type::key(v), std::move(v)); + } + iterator insert(const_iterator position, const value_type &x) { + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(x), x) + .first; + } + iterator insert(const_iterator position, value_type &&x) { + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(x), + std::move(x)) + .first; + } + template <typename... Args> + iterator emplace_hint(const_iterator position, Args &&... args) { + init_type v(std::forward<Args>(args)...); + return this->tree_ + .insert_hint_unique(iterator(position), params_type::key(v), + std::move(v)) + .first; + } + template <typename InputIterator> + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_unique(b, e); + } + void insert(std::initializer_list<init_type> init) { + this->tree_.insert_iterator_unique(init.begin(), init.end()); + } + insert_return_type insert(node_type &&node) { + if (!node) return {this->end(), false, node_type()}; + std::pair<iterator, bool> res = + this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) { + CommonAccess::Destroy(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + std::pair<iterator, bool> res = this->tree_.insert_hint_unique( + iterator(hint), params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) CommonAccess::Destroy(&node); + return res.first; + } + + // Deletion routines. + template <typename K = key_type> + size_type erase(const key_arg<K> &key) { + return this->tree_.erase_unique(key); + } + using super_type::erase; + + // Node extraction routines. + template <typename K = key_type> + node_type extract(const key_arg<K> &key) { + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); + } + using super_type::extract; + + // Merge routines. + // Moves elements from `src` into `this`. If the element already exists in + // `this`, it is left unmodified in `src`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same<value_type, typename T::value_type>, + std::is_same<allocator_type, typename T::allocator_type>, + std::is_same<typename params_type::is_map_container, + typename T::params_type::is_map_container>>::value, + int> = 0> + void merge(btree_container<T> &src) { // NOLINT + for (auto src_it = src.begin(); src_it != src.end();) { + if (insert(std::move(*src_it)).second) { + src_it = src.erase(src_it); + } else { + ++src_it; + } + } + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same<value_type, typename T::value_type>, + std::is_same<allocator_type, typename T::allocator_type>, + std::is_same<typename params_type::is_map_container, + typename T::params_type::is_map_container>>::value, + int> = 0> + void merge(btree_container<T> &&src) { + merge(src); + } +}; + +// Base class for btree_map. +template <typename Tree> +class btree_map_container : public btree_set_container<Tree> { + using super_type = btree_set_container<Tree>; + using params_type = typename Tree::params_type; + + private: + template <class K> + using key_arg = typename super_type::template key_arg<K>; + + public: + using key_type = typename Tree::key_type; + using mapped_type = typename params_type::mapped_type; + using value_type = typename Tree::value_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + + // Inherit constructors. + using super_type::super_type; + btree_map_container() {} + + // Insertion routines. + // Note: the nullptr template arguments and extra `const M&` overloads allow + // for supporting bitfield arguments. + // Note: when we call `std::forward<M>(obj)` twice, it's safe because + // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when + // `ret.second` is false. + template <class M> + std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) { + const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj); + if (!ret.second) ret.first->second = obj; + return ret; + } + template <class M, key_type * = nullptr> + std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) { + const std::pair<iterator, bool> ret = + this->tree_.insert_unique(k, std::move(k), obj); + if (!ret.second) ret.first->second = obj; + return ret; + } + template <class M, M * = nullptr> + std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) { + const std::pair<iterator, bool> ret = + this->tree_.insert_unique(k, k, std::forward<M>(obj)); + if (!ret.second) ret.first->second = std::forward<M>(obj); + return ret; + } + template <class M, key_type * = nullptr, M * = nullptr> + std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) { + const std::pair<iterator, bool> ret = + this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj)); + if (!ret.second) ret.first->second = std::forward<M>(obj); + return ret; + } + template <class M> + iterator insert_or_assign(const_iterator position, const key_type &k, + const M &obj) { + const std::pair<iterator, bool> ret = + this->tree_.insert_hint_unique(iterator(position), k, k, obj); + if (!ret.second) ret.first->second = obj; + return ret.first; + } + template <class M, key_type * = nullptr> + iterator insert_or_assign(const_iterator position, key_type &&k, + const M &obj) { + const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique( + iterator(position), k, std::move(k), obj); + if (!ret.second) ret.first->second = obj; + return ret.first; + } + template <class M, M * = nullptr> + iterator insert_or_assign(const_iterator position, const key_type &k, + M &&obj) { + const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique( + iterator(position), k, k, std::forward<M>(obj)); + if (!ret.second) ret.first->second = std::forward<M>(obj); + return ret.first; + } + template <class M, key_type * = nullptr, M * = nullptr> + iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) { + const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique( + iterator(position), k, std::move(k), std::forward<M>(obj)); + if (!ret.second) ret.first->second = std::forward<M>(obj); + return ret.first; + } + template <typename... Args> + std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(k), + std::forward_as_tuple(std::forward<Args>(args)...)); + } + template <typename... Args> + std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) { + // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` + // and then using `k` unsequenced. This is safe because the move is into a + // forwarding reference and insert_unique guarantees that `key` is never + // referenced after consuming `args`. + const key_type &key_ref = k; + return this->tree_.insert_unique( + key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)), + std::forward_as_tuple(std::forward<Args>(args)...)); + } + template <typename... Args> + iterator try_emplace(const_iterator hint, const key_type &k, + Args &&... args) { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, + std::forward_as_tuple(k), + std::forward_as_tuple(std::forward<Args>(args)...)) + .first; + } + template <typename... Args> + iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) { + // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` + // and then using `k` unsequenced. This is safe because the move is into a + // forwarding reference and insert_hint_unique guarantees that `key` is + // never referenced after consuming `args`. + const key_type &key_ref = k; + return this->tree_ + .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct, + std::forward_as_tuple(std::move(k)), + std::forward_as_tuple(std::forward<Args>(args)...)) + .first; + } + mapped_type &operator[](const key_type &k) { + return try_emplace(k).first->second; + } + mapped_type &operator[](key_type &&k) { + return try_emplace(std::move(k)).first->second; + } + + template <typename K = key_type> + mapped_type &at(const key_arg<K> &key) { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + template <typename K = key_type> + const mapped_type &at(const key_arg<K> &key) const { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } +}; + +// A common base class for btree_multiset and btree_multimap. +template <typename Tree> +class btree_multiset_container : public btree_container<Tree> { + using super_type = btree_container<Tree>; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + + template <class K> + using key_arg = typename super_type::template key_arg<K>; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + + // Inherit constructors. + using super_type::super_type; + btree_multiset_container() {} + + // Range constructor. + template <class InputIterator> + btree_multiset_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + + // Initializer list constructor. + btree_multiset_container(std::initializer_list<init_type> init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} + + // Lookup routines. + template <typename K = key_type> + size_type count(const key_arg<K> &key) const { + return this->tree_.count_multi(key); + } + + // Insertion routines. + iterator insert(const value_type &x) { return this->tree_.insert_multi(x); } + iterator insert(value_type &&x) { + return this->tree_.insert_multi(std::move(x)); + } + iterator insert(const_iterator position, const value_type &x) { + return this->tree_.insert_hint_multi(iterator(position), x); + } + iterator insert(const_iterator position, value_type &&x) { + return this->tree_.insert_hint_multi(iterator(position), std::move(x)); + } + template <typename InputIterator> + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_multi(b, e); + } + void insert(std::initializer_list<init_type> init) { + this->tree_.insert_iterator_multi(init.begin(), init.end()); + } + template <typename... Args> + iterator emplace(Args &&... args) { + return this->tree_.insert_multi(init_type(std::forward<Args>(args)...)); + } + template <typename... Args> + iterator emplace_hint(const_iterator position, Args &&... args) { + return this->tree_.insert_hint_multi( + iterator(position), init_type(std::forward<Args>(args)...)); + } + iterator insert(node_type &&node) { + if (!node) return this->end(); + iterator res = + this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + CommonAccess::Destroy(&node); + return res; + } + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + iterator res = this->tree_.insert_hint_multi( + iterator(hint), + std::move(params_type::element(CommonAccess::GetSlot(node)))); + CommonAccess::Destroy(&node); + return res; + } + + // Deletion routines. + template <typename K = key_type> + size_type erase(const key_arg<K> &key) { + return this->tree_.erase_multi(key); + } + using super_type::erase; + + // Node extraction routines. + template <typename K = key_type> + node_type extract(const key_arg<K> &key) { + auto it = this->find(key); + return it == this->end() ? node_type() : extract(it); + } + using super_type::extract; + + // Merge routines. + // Moves all elements from `src` into `this`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same<value_type, typename T::value_type>, + std::is_same<allocator_type, typename T::allocator_type>, + std::is_same<typename params_type::is_map_container, + typename T::params_type::is_map_container>>::value, + int> = 0> + void merge(btree_container<T> &src) { // NOLINT + insert(std::make_move_iterator(src.begin()), + std::make_move_iterator(src.end())); + src.clear(); + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same<value_type, typename T::value_type>, + std::is_same<allocator_type, typename T::allocator_type>, + std::is_same<typename params_type::is_map_container, + typename T::params_type::is_map_container>>::value, + int> = 0> + void merge(btree_container<T> &&src) { + merge(src); + } +}; + +// A base class for btree_multimap. +template <typename Tree> +class btree_multimap_container : public btree_multiset_container<Tree> { + using super_type = btree_multiset_container<Tree>; + using params_type = typename Tree::params_type; + + public: + using mapped_type = typename params_type::mapped_type; + + // Inherit constructors. + using super_type::super_type; + btree_multimap_container() {} +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h index a02cd5c3..5037d803 100644 --- a/absl/container/internal/common.h +++ b/absl/container/internal/common.h @@ -22,7 +22,7 @@ #include "absl/types/optional.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class, class = void> @@ -56,7 +56,7 @@ class node_handle_base { public: using allocator_type = Alloc; - constexpr node_handle_base() {} + constexpr node_handle_base() = default; node_handle_base(node_handle_base&& other) noexcept { *this = std::move(other); } @@ -109,16 +109,15 @@ class node_handle_base { allocator_type* alloc() { return std::addressof(*alloc_); } private: - absl::optional<allocator_type> alloc_; - mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)> - slot_space_; + absl::optional<allocator_type> alloc_ = {}; + alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {}; }; // For sets. template <typename Policy, typename PolicyTraits, typename Alloc, typename = void> class node_handle : public node_handle_base<PolicyTraits, Alloc> { - using Base = typename node_handle::node_handle_base; + using Base = node_handle_base<PolicyTraits, Alloc>; public: using value_type = typename PolicyTraits::value_type; @@ -138,7 +137,7 @@ template <typename Policy, typename PolicyTraits, typename Alloc> class node_handle<Policy, PolicyTraits, Alloc, absl::void_t<typename Policy::mapped_type>> : public node_handle_base<PolicyTraits, Alloc> { - using Base = typename node_handle::node_handle_base; + using Base = node_handle_base<PolicyTraits, Alloc>; public: using key_type = typename Policy::key_type; @@ -168,6 +167,11 @@ struct CommonAccess { } template <typename Node> + static void Destroy(Node* node) { + node->destroy(); + } + + template <typename Node> static void Reset(Node* node) { node->reset(); } @@ -192,7 +196,7 @@ struct InsertReturnType { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_ diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h index fbace496..4bfe92fd 100644 --- a/absl/container/internal/compressed_tuple.h +++ b/absl/container/internal/compressed_tuple.h @@ -48,7 +48,7 @@ #endif namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <typename... Ts> @@ -257,7 +257,7 @@ template <> class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index ec893b90..1dae12db 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc @@ -48,7 +48,7 @@ struct TwoValues { namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -333,10 +333,6 @@ TEST(CompressedTupleTest, AnyElements) { a = 0.5f; EXPECT_EQ(absl::any_cast<float>(x.get<1>()), 0.5); - - // Ensure copy construction work in the face of a type with a universal - // implicit constructor; - CompressedTuple<absl::any> c{}, d(c); // NOLINT } TEST(CompressedTupleTest, Constexpr) { @@ -409,5 +405,5 @@ TEST(CompressedTupleTest, EmptyFinalClass) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h index eb6d7eb7..d24b0f84 100644 --- a/absl/container/internal/container_memory.h +++ b/absl/container/internal/container_memory.h @@ -34,7 +34,7 @@ #include "absl/utility/utility.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // Allocates at least n bytes aligned to the specified alignment. @@ -434,7 +434,7 @@ struct map_slot_policy { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc index ea9568dc..7942c7be 100644 --- a/absl/container/internal/container_memory_test.cc +++ b/absl/container/internal/container_memory_test.cc @@ -23,7 +23,7 @@ #include "absl/strings/string_view.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -186,5 +186,5 @@ TEST(DecomposePair, NotDecomposable) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h index 94a457ca..9efdc662 100644 --- a/absl/container/internal/counting_allocator.h +++ b/absl/container/internal/counting_allocator.h @@ -19,8 +19,10 @@ #include <cstdint> #include <memory> +#include "absl/base/config.h" + namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // This is a stateful allocator, but the state lives outside of the @@ -75,7 +77,7 @@ class CountingAllocator : public std::allocator<T> { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h index 2155076d..401ddf4d 100644 --- a/absl/container/internal/hash_function_defaults.h +++ b/absl/container/internal/hash_function_defaults.h @@ -56,7 +56,7 @@ #include "absl/strings/string_view.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // The hash of an object of type T is computed by using absl::Hash. @@ -140,7 +140,7 @@ template <class T> using hash_default_eq = typename container_internal::HashEq<T>::Eq; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc index ce6133f8..2eefc7e0 100644 --- a/absl/container/internal/hash_function_defaults_test.cc +++ b/absl/container/internal/hash_function_defaults_test.cc @@ -22,7 +22,7 @@ #include "absl/strings/string_view.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -249,7 +249,7 @@ TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct); } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl enum Hash : size_t { @@ -280,7 +280,7 @@ struct hash<Hashable<H>> { } // namespace std namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -295,5 +295,5 @@ TEST(Delegate, HashDispatch) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc index 36b2571b..75c4db6c 100644 --- a/absl/container/internal/hash_generator_testing.cc +++ b/absl/container/internal/hash_generator_testing.cc @@ -17,7 +17,7 @@ #include <deque> namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hash_internal { namespace { @@ -70,5 +70,5 @@ absl::string_view Generator<absl::string_view>::operator()() const { } // namespace hash_internal } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h index 27962c35..6869fe45 100644 --- a/absl/container/internal/hash_generator_testing.h +++ b/absl/container/internal/hash_generator_testing.h @@ -19,6 +19,7 @@ #define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ #include <stdint.h> + #include <algorithm> #include <iosfwd> #include <random> @@ -27,11 +28,12 @@ #include <utility> #include "absl/container/internal/hash_policy_testing.h" +#include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hash_internal { namespace generator_internal { @@ -130,6 +132,13 @@ struct Generator<std::tuple<Ts...>> { } }; +template <class T> +struct Generator<std::unique_ptr<T>> { + std::unique_ptr<T> operator()() const { + return absl::make_unique<T>(Generator<T>()()); + } +}; + template <class U> struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()), decltype(std::declval<U&>().value())>> @@ -146,7 +155,7 @@ using GeneratedType = decltype( } // namespace hash_internal } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h index 8f0d2a52..01c40d2e 100644 --- a/absl/container/internal/hash_policy_testing.h +++ b/absl/container/internal/hash_policy_testing.h @@ -30,7 +30,7 @@ #include "absl/strings/string_view.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hash_testing_internal { @@ -163,7 +163,7 @@ auto keys(const Set& s) } } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl // ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions diff --git a/absl/container/internal/hash_policy_testing_test.cc b/absl/container/internal/hash_policy_testing_test.cc index 8fd1df00..f0b20fe3 100644 --- a/absl/container/internal/hash_policy_testing_test.cc +++ b/absl/container/internal/hash_policy_testing_test.cc @@ -17,7 +17,7 @@ #include "gtest/gtest.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -41,5 +41,5 @@ TEST(_, Hash) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h index 3d87e821..3e1209c6 100644 --- a/absl/container/internal/hash_policy_traits.h +++ b/absl/container/internal/hash_policy_traits.h @@ -23,7 +23,7 @@ #include "absl/meta/type_traits.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // Defines how slots are initialized/destroyed/moved. @@ -185,7 +185,7 @@ struct hash_policy_traits { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc index edfaf63e..6ef8b9e0 100644 --- a/absl/container/internal/hash_policy_traits_test.cc +++ b/absl/container/internal/hash_policy_traits_test.cc @@ -22,7 +22,7 @@ #include "gtest/gtest.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -140,5 +140,5 @@ TEST_F(Test, with_transfer) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h index 1d1a9c28..19d52121 100644 --- a/absl/container/internal/hashtable_debug.h +++ b/absl/container/internal/hashtable_debug.h @@ -38,7 +38,7 @@ #include "absl/container/internal/hashtable_debug_hooks.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // Returns the number of probes required to lookup `key`. Returns 0 for a @@ -104,7 +104,7 @@ size_t LowerBoundAllocatedByteSize(size_t num_elements) { } } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/absl/container/internal/hashtable_debug_hooks.h b/absl/container/internal/hashtable_debug_hooks.h index 7b95fcef..3e9ea595 100644 --- a/absl/container/internal/hashtable_debug_hooks.h +++ b/absl/container/internal/hashtable_debug_hooks.h @@ -23,8 +23,10 @@ #include <type_traits> #include <vector> +#include "absl/base/config.h" + namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hashtable_debug_internal { @@ -77,7 +79,7 @@ struct HashtableDebugAccess { } // namespace hashtable_debug_internal } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 2338045d..56447251 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -21,13 +21,14 @@ #include <limits> #include "absl/base/attributes.h" +#include "absl/base/internal/exponential_biased.h" #include "absl/container/internal/have_sse.h" #include "absl/debugging/stacktrace.h" #include "absl/memory/memory.h" #include "absl/synchronization/mutex.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { constexpr int HashtablezInfo::kMaxStackDepth; @@ -38,80 +39,17 @@ ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{ ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10}; ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20}; -// Returns the next pseudo-random value. -// pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48 -// This is the lrand64 generator. -uint64_t NextRandom(uint64_t rnd) { - const uint64_t prng_mult = uint64_t{0x5DEECE66D}; - const uint64_t prng_add = 0xB; - const uint64_t prng_mod_power = 48; - const uint64_t prng_mod_mask = ~(~uint64_t{0} << prng_mod_power); - return (prng_mult * rnd + prng_add) & prng_mod_mask; -} - -// Generates a geometric variable with the specified mean. -// This is done by generating a random number between 0 and 1 and applying -// the inverse cumulative distribution function for an exponential. -// Specifically: Let m be the inverse of the sample period, then -// the probability distribution function is m*exp(-mx) so the CDF is -// p = 1 - exp(-mx), so -// q = 1 - p = exp(-mx) -// log_e(q) = -mx -// -log_e(q)/m = x -// log_2(q) * (-log_e(2) * 1/m) = x -// In the code, q is actually in the range 1 to 2**26, hence the -26 below -// -int64_t GetGeometricVariable(int64_t mean) { -#if ABSL_HAVE_THREAD_LOCAL - thread_local -#else // ABSL_HAVE_THREAD_LOCAL - // SampleSlow and hence GetGeometricVariable is guarded by a single mutex when - // there are not thread locals. Thus, a single global rng is acceptable for - // that case. - static -#endif // ABSL_HAVE_THREAD_LOCAL - uint64_t rng = []() { - // We don't get well distributed numbers from this so we call - // NextRandom() a bunch to mush the bits around. We use a global_rand - // to handle the case where the same thread (by memory address) gets - // created and destroyed repeatedly. - ABSL_CONST_INIT static std::atomic<uint32_t> global_rand(0); - uint64_t r = reinterpret_cast<uint64_t>(&rng) + - global_rand.fetch_add(1, std::memory_order_relaxed); - for (int i = 0; i < 20; ++i) { - r = NextRandom(r); - } - return r; - }(); - - rng = NextRandom(rng); - - // Take the top 26 bits as the random number - // (This plus the 1<<58 sampling bound give a max possible step of - // 5194297183973780480 bytes.) - const uint64_t prng_mod_power = 48; // Number of bits in prng - // The uint32_t cast is to prevent a (hard-to-reproduce) NAN - // under piii debug for some binaries. - double q = static_cast<uint32_t>(rng >> (prng_mod_power - 26)) + 1.0; - // Put the computed p-value through the CDF of a geometric. - double interval = (log2(q) - 26) * (-std::log(2.0) * mean); - - // Very large values of interval overflow int64_t. If we happen to - // hit such improbable condition, we simply cheat and clamp interval - // to largest supported value. - if (interval > static_cast<double>(std::numeric_limits<int64_t>::max() / 2)) { - return std::numeric_limits<int64_t>::max() / 2; - } - - // Small values of interval are equivalent to just sampling next time. - if (interval < 1) { - return 1; - } - return static_cast<int64_t>(interval); -} +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased + g_exponential_biased_generator; +#endif } // namespace +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + HashtablezSampler& HashtablezSampler::Global() { static auto* sampler = new HashtablezSampler(); return *sampler; @@ -229,15 +167,39 @@ int64_t HashtablezSampler::Iterate( return dropped_samples_.load(std::memory_order_relaxed); } +static bool ShouldForceSampling() { + enum ForceState { + kDontForce, + kForce, + kUninitialized + }; + ABSL_CONST_INIT static std::atomic<ForceState> global_state{ + kUninitialized}; + ForceState state = global_state.load(std::memory_order_relaxed); + if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; + + if (state == kUninitialized) { + state = AbslContainerInternalSampleEverything() ? kForce : kDontForce; + global_state.store(state, std::memory_order_relaxed); + } + return state == kForce; +} + HashtablezInfo* SampleSlow(int64_t* next_sample) { - if (kAbslContainerInternalSampleEverything) { + if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { *next_sample = 1; return HashtablezSampler::Global().Register(); } +#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + *next_sample = std::numeric_limits<int64_t>::max(); + return nullptr; +#else bool first = *next_sample < 0; - *next_sample = GetGeometricVariable( + *next_sample = g_exponential_biased_generator.GetStride( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); + // Small values of interval are equivalent to just sampling next time. + ABSL_ASSERT(*next_sample >= 1); // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold // low enough that we will start sampling in a reasonable time, so we just use @@ -252,12 +214,9 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) { } return HashtablezSampler::Global().Register(); +#endif } -#if ABSL_PER_THREAD_TLS == 1 -ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; -#endif // ABSL_PER_THREAD_TLS == 1 - void UnsampleSlow(HashtablezInfo* info) { HashtablezSampler::Global().Unregister(info); } @@ -306,5 +265,5 @@ void SetHashtablezMaxSamples(int32_t max) { } } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index f17c425c..34d5e572 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -51,7 +51,7 @@ #include "absl/utility/utility.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // Stores information about a sampled hashtable. All mutations to this *must* @@ -66,7 +66,7 @@ struct HashtablezInfo { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling() EXCLUSIVE_LOCKS_REQUIRED(init_mu); + void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. @@ -84,7 +84,7 @@ struct HashtablezInfo { // prevents races with sampling and resurrecting an object. absl::Mutex init_mu; HashtablezInfo* next; - HashtablezInfo* dead GUARDED_BY(init_mu); + HashtablezInfo* dead ABSL_GUARDED_BY(init_mu); // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. @@ -180,23 +180,30 @@ class HashtablezInfoHandle { HashtablezInfo* info_; }; -#if ABSL_PER_THREAD_TLS == 1 +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \ + !defined(ABSL_CONSUME_DLL) +#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE +#endif + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; #endif // ABSL_PER_THREAD_TLS // Returns an RAII sampling handle that manages registration and unregistation // with the global sampler. inline HashtablezInfoHandle Sample() { -#if ABSL_PER_THREAD_TLS == 0 - static auto* mu = new absl::Mutex; - static int64_t global_next_sample = 0; - absl::MutexLock l(mu); -#endif // !ABSL_HAVE_THREAD_LOCAL - +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) { return HashtablezInfoHandle(nullptr); } return HashtablezInfoHandle(SampleSlow(&global_next_sample)); +#else + return HashtablezInfoHandle(nullptr); +#endif // !ABSL_PER_THREAD_TLS } // Holds samples and their associated stack traces with a soft limit of @@ -281,10 +288,10 @@ void SetHashtablezMaxSamples(int32_t max); // initialization of static storage duration objects. // The definition of this constant is weak, which allows us to inject a // different value for it at link time. -extern "C" const bool kAbslContainerInternalSampleEverything; +extern "C" bool AbslContainerInternalSampleEverything(); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ diff --git a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc index d3f41c7c..78b9d362 100644 --- a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc +++ b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -17,13 +17,14 @@ #include "absl/base/attributes.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // See hashtablez_sampler.h for details. -extern "C" ABSL_ATTRIBUTE_WEAK const bool - kAbslContainerInternalSampleEverything = false; +extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() { + return false; +} } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index bdae75f3..36f5ccdd 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -36,7 +36,7 @@ constexpr int kProbeLength = 8; #endif namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { class HashtablezInfoHandlePeer { public: @@ -169,6 +169,7 @@ TEST(HashtablezInfoTest, RecordRehash) { EXPECT_EQ(info.num_erases.load(), 0); } +#if defined(ABSL_HASHTABLEZ_SAMPLE) TEST(HashtablezSamplerTest, SmallSampleParameter) { SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); @@ -212,6 +213,7 @@ TEST(HashtablezSamplerTest, Sample) { } EXPECT_NEAR(sample_rate, 0.01, 0.005); } +#endif TEST(HashtablezSamplerTest, Handle) { auto& sampler = HashtablezSampler::Global(); @@ -353,5 +355,5 @@ TEST(HashtablezSamplerTest, Callback) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 123e04c9..4d80b727 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -30,7 +30,7 @@ #include "absl/types/span.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace inlined_vector_internal { template <typename Iterator> @@ -38,16 +38,17 @@ using IsAtLeastForwardIterator = std::is_convertible< typename std::iterator_traits<Iterator>::iterator_category, std::forward_iterator_tag>; -template <typename AllocatorType> -using IsMemcpyOk = absl::conjunction< - std::is_same<std::allocator<typename AllocatorType::value_type>, - AllocatorType>, - absl::is_trivially_copy_constructible<typename AllocatorType::value_type>, - absl::is_trivially_copy_assignable<typename AllocatorType::value_type>, - absl::is_trivially_destructible<typename AllocatorType::value_type>>; - -template <typename AllocatorType, typename ValueType, typename SizeType> -void DestroyElements(AllocatorType* alloc_ptr, ValueType* destroy_first, +template <typename AllocatorType, + typename ValueType = + typename absl::allocator_traits<AllocatorType>::value_type> +using IsMemcpyOk = + absl::conjunction<std::is_same<AllocatorType, std::allocator<ValueType>>, + absl::is_trivially_copy_constructible<ValueType>, + absl::is_trivially_copy_assignable<ValueType>, + absl::is_trivially_destructible<ValueType>>; + +template <typename AllocatorType, typename Pointer, typename SizeType> +void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first, SizeType destroy_size) { using AllocatorTraits = absl::allocator_traits<AllocatorType>; @@ -57,21 +58,26 @@ void DestroyElements(AllocatorType* alloc_ptr, ValueType* destroy_first, AllocatorTraits::destroy(*alloc_ptr, destroy_first + i); } -#ifndef NDEBUG - // Overwrite unused memory with `0xab` so we can catch uninitialized usage. - // - // Cast to `void*` to tell the compiler that we don't care that we might be - // scribbling on a vtable pointer. - auto* memory_ptr = static_cast<void*>(destroy_first); - auto memory_size = sizeof(ValueType) * destroy_size; - std::memset(memory_ptr, 0xab, memory_size); -#endif // NDEBUG +#if !defined(NDEBUG) + { + using ValueType = typename AllocatorTraits::value_type; + + // Overwrite unused memory with `0xab` so we can catch uninitialized + // usage. + // + // Cast to `void*` to tell the compiler that we don't care that we might + // be scribbling on a vtable pointer. + void* memory_ptr = destroy_first; + auto memory_size = destroy_size * sizeof(ValueType); + std::memset(memory_ptr, 0xab, memory_size); + } +#endif // !defined(NDEBUG) } } -template <typename AllocatorType, typename ValueType, typename ValueAdapter, +template <typename AllocatorType, typename Pointer, typename ValueAdapter, typename SizeType> -void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first, +void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first, ValueAdapter* values_ptr, SizeType construct_size) { for (SizeType i = 0; i < construct_size; ++i) { ABSL_INTERNAL_TRY { @@ -84,8 +90,8 @@ void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first, } } -template <typename ValueType, typename ValueAdapter, typename SizeType> -void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr, +template <typename Pointer, typename ValueAdapter, typename SizeType> +void AssignElements(Pointer assign_first, ValueAdapter* values_ptr, SizeType assign_size) { for (SizeType i = 0; i < assign_size; ++i) { values_ptr->AssignNext(assign_first + i); @@ -94,28 +100,29 @@ void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr, template <typename AllocatorType> struct StorageView { - using pointer = typename AllocatorType::pointer; - using size_type = typename AllocatorType::size_type; + using AllocatorTraits = absl::allocator_traits<AllocatorType>; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; - pointer data; - size_type size; - size_type capacity; + Pointer data; + SizeType size; + SizeType capacity; }; template <typename AllocatorType, typename Iterator> class IteratorValueAdapter { - using pointer = typename AllocatorType::pointer; using AllocatorTraits = absl::allocator_traits<AllocatorType>; + using Pointer = typename AllocatorTraits::pointer; public: explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} - void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) { + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { AllocatorTraits::construct(*alloc_ptr, construct_at, *it_); ++it_; } - void AssignNext(pointer assign_at) { + void AssignNext(Pointer assign_at) { *assign_at = *it_; ++it_; } @@ -126,46 +133,45 @@ class IteratorValueAdapter { template <typename AllocatorType> class CopyValueAdapter { - using pointer = typename AllocatorType::pointer; - using const_pointer = typename AllocatorType::const_pointer; - using const_reference = typename AllocatorType::const_reference; using AllocatorTraits = absl::allocator_traits<AllocatorType>; + using ValueType = typename AllocatorTraits::value_type; + using Pointer = typename AllocatorTraits::pointer; + using ConstPointer = typename AllocatorTraits::const_pointer; public: - explicit CopyValueAdapter(const_reference v) : ptr_(std::addressof(v)) {} + explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {} - void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) { + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_); } - void AssignNext(pointer assign_at) { *assign_at = *ptr_; } + void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } private: - const_pointer ptr_; + ConstPointer ptr_; }; template <typename AllocatorType> class DefaultValueAdapter { - using pointer = typename AllocatorType::pointer; - using value_type = typename AllocatorType::value_type; using AllocatorTraits = absl::allocator_traits<AllocatorType>; + using ValueType = typename AllocatorTraits::value_type; + using Pointer = typename AllocatorTraits::pointer; public: explicit DefaultValueAdapter() {} - void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) { + void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { AllocatorTraits::construct(*alloc_ptr, construct_at); } - void AssignNext(pointer assign_at) { *assign_at = value_type(); } + void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } }; template <typename AllocatorType> class AllocationTransaction { - using value_type = typename AllocatorType::value_type; - using pointer = typename AllocatorType::pointer; - using size_type = typename AllocatorType::size_type; using AllocatorTraits = absl::allocator_traits<AllocatorType>; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; public: explicit AllocationTransaction(AllocatorType* alloc_ptr) @@ -181,25 +187,31 @@ class AllocationTransaction { void operator=(const AllocationTransaction&) = delete; AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } - pointer& GetData() { return alloc_data_.template get<1>(); } - size_type& GetCapacity() { return capacity_; } + Pointer& GetData() { return alloc_data_.template get<1>(); } + SizeType& GetCapacity() { return capacity_; } bool DidAllocate() { return GetData() != nullptr; } - pointer Allocate(size_type capacity) { + Pointer Allocate(SizeType capacity) { GetData() = AllocatorTraits::allocate(GetAllocator(), capacity); GetCapacity() = capacity; return GetData(); } + void Reset() { + GetData() = nullptr; + GetCapacity() = 0; + } + private: - container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_; - size_type capacity_ = 0; + container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_; + SizeType capacity_ = 0; }; template <typename AllocatorType> class ConstructionTransaction { - using pointer = typename AllocatorType::pointer; - using size_type = typename AllocatorType::size_type; + using AllocatorTraits = absl::allocator_traits<AllocatorType>; + using Pointer = typename AllocatorTraits::pointer; + using SizeType = typename AllocatorTraits::size_type; public: explicit ConstructionTransaction(AllocatorType* alloc_ptr) @@ -216,12 +228,12 @@ class ConstructionTransaction { void operator=(const ConstructionTransaction&) = delete; AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } - pointer& GetData() { return alloc_data_.template get<1>(); } - size_type& GetSize() { return size_; } + Pointer& GetData() { return alloc_data_.template get<1>(); } + SizeType& GetSize() { return size_; } bool DidConstruct() { return GetData() != nullptr; } template <typename ValueAdapter> - void Construct(pointer data, ValueAdapter* values_ptr, size_type size) { + void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) { inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()), data, values_ptr, size); GetData() = data; @@ -233,28 +245,29 @@ class ConstructionTransaction { } private: - container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_; - size_type size_ = 0; + container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_; + SizeType size_ = 0; }; template <typename T, size_t N, typename A> class Storage { public: - using allocator_type = A; - using value_type = typename allocator_type::value_type; - using pointer = typename allocator_type::pointer; - using const_pointer = typename allocator_type::const_pointer; - using reference = typename allocator_type::reference; - using const_reference = typename allocator_type::const_reference; - using rvalue_reference = typename allocator_type::value_type&&; - using size_type = typename allocator_type::size_type; - using difference_type = typename allocator_type::difference_type; + using AllocatorTraits = absl::allocator_traits<A>; + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + + using reference = value_type&; + using const_reference = const value_type&; + using RValueReference = value_type&&; using iterator = pointer; using const_iterator = const_pointer; using reverse_iterator = std::reverse_iterator<iterator>; using const_reverse_iterator = std::reverse_iterator<const_iterator>; using MoveIterator = std::move_iterator<iterator>; - using AllocatorTraits = absl::allocator_traits<allocator_type>; using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>; using StorageView = inlined_vector_internal::StorageView<allocator_type>; @@ -287,8 +300,7 @@ class Storage { Storage() : metadata_() {} - explicit Storage(const allocator_type& alloc) - : metadata_(alloc, /* empty and inlined */ 0) {} + explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {} ~Storage() { pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); @@ -414,8 +426,8 @@ class Storage { void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) { SetAllocatedData(allocation_tx_ptr->GetData(), allocation_tx_ptr->GetCapacity()); - allocation_tx_ptr->GetData() = nullptr; - allocation_tx_ptr->GetCapacity() = 0; + + allocation_tx_ptr->Reset(); } void MemcpyFrom(const Storage& other_storage) { @@ -442,9 +454,7 @@ class Storage { }; struct Inlined { - using InlinedDataElement = - absl::aligned_storage_t<sizeof(value_type), alignof(value_type)>; - InlinedDataElement inlined_data[N]; + alignas(value_type) char inlined_data[sizeof(value_type[N])]; }; union Data { @@ -465,18 +475,14 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size) assert(GetSize() == 0); pointer construct_data; - if (new_size > GetInlinedCapacity()) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); - pointer new_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity); - - SetAllocatedData(new_data, new_capacity); + construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity); + SetAllocatedData(construct_data, new_capacity); SetIsAllocated(); - - construct_data = new_data; } else { construct_data = GetInlinedData(); } @@ -503,9 +509,7 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void { if (new_size > storage_view.capacity) { size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); - pointer new_data = allocation_tx.Allocate(new_capacity); - - construct_loop = {new_data, new_size}; + construct_loop = {allocation_tx.Allocate(new_capacity), new_size}; destroy_loop = {storage_view.data, storage_view.size}; } else if (new_size > storage_view.size) { assign_loop = {storage_view.data, storage_view.size}; @@ -539,12 +543,12 @@ template <typename ValueAdapter> auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void { StorageView storage_view = MakeStorageView(); - AllocationTransaction allocation_tx(GetAllocPtr()); - ConstructionTransaction construction_tx(GetAllocPtr()); - IteratorValueAdapter<MoveIterator> move_values( MoveIterator(storage_view.data)); + AllocationTransaction allocation_tx(GetAllocPtr()); + ConstructionTransaction construction_tx(GetAllocPtr()); + absl::Span<value_type> construct_loop; absl::Span<value_type> move_construct_loop; absl::Span<value_type> destroy_loop; @@ -687,19 +691,17 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { MoveIterator(storage_view.data)); pointer construct_data; - if (storage_view.size == storage_view.capacity) { size_type new_capacity = NextCapacity(storage_view.capacity); - pointer new_data = allocation_tx.Allocate(new_capacity); - - construct_data = new_data; + construct_data = allocation_tx.Allocate(new_capacity); } else { construct_data = storage_view.data; } - pointer end = construct_data + storage_view.size; + pointer last_ptr = construct_data + storage_view.size; - AllocatorTraits::construct(*GetAllocPtr(), end, std::forward<Args>(args)...); + AllocatorTraits::construct(*GetAllocPtr(), last_ptr, + std::forward<Args>(args)...); if (allocation_tx.DidAllocate()) { ABSL_INTERNAL_TRY { @@ -708,7 +710,7 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { storage_view.size); } ABSL_INTERNAL_CATCH_ANY { - AllocatorTraits::destroy(*GetAllocPtr(), end); + AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); ABSL_INTERNAL_RETHROW; } @@ -721,14 +723,12 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { } AddSize(1); - return *end; + return *last_ptr; } template <typename T, size_t N, typename A> auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to) -> iterator { - assert(from != to); - StorageView storage_view = MakeStorageView(); size_type erase_size = std::distance(from, to); @@ -793,12 +793,9 @@ auto Storage<T, N, A>::ShrinkToFit() -> void { MoveIterator(storage_view.data)); pointer construct_data; - if (storage_view.size > GetInlinedCapacity()) { size_type new_capacity = storage_view.size; - pointer new_data = allocation_tx.Allocate(new_capacity); - - construct_data = new_data; + construct_data = allocation_tx.Allocate(new_capacity); } else { construct_data = GetInlinedData(); } @@ -889,7 +886,7 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void { } } // namespace inlined_vector_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h index 3924b8aa..69cc85dd 100644 --- a/absl/container/internal/layout.h +++ b/absl/container/internal/layout.h @@ -188,7 +188,7 @@ #endif namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // A type wrapper that instructs `Layout` to use the specific alignment for the @@ -735,7 +735,7 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc index 44d84607..8f3628a1 100644 --- a/absl/container/internal/layout_test.cc +++ b/absl/container/internal/layout_test.cc @@ -28,7 +28,7 @@ #include "absl/types/span.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -1563,5 +1563,5 @@ TEST(CompactString, Works) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_hash_policy.h index d7581360..4617162f 100644 --- a/absl/container/internal/node_hash_policy.h +++ b/absl/container/internal/node_hash_policy.h @@ -39,8 +39,10 @@ #include <type_traits> #include <utility> +#include "absl/base/config.h" + namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class Reference, class Policy> @@ -84,7 +86,7 @@ struct node_hash_policy { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_ diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_hash_policy_test.cc index d53b7364..84aabba9 100644 --- a/absl/container/internal/node_hash_policy_test.cc +++ b/absl/container/internal/node_hash_policy_test.cc @@ -21,7 +21,7 @@ #include "absl/container/internal/hash_policy_traits.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -65,5 +65,5 @@ TEST_F(NodeTest, transfer) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h index 00caa373..0a02757d 100644 --- a/absl/container/internal/raw_hash_map.h +++ b/absl/container/internal/raw_hash_map.h @@ -24,7 +24,7 @@ #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class Policy, class Hash, class Eq, class Alloc> @@ -110,6 +110,9 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> { return insert_or_assign(k, v).first; } + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. template <class K = key_type, class... Args, typename std::enable_if< !std::is_convertible<K, const_iterator>::value, int>::type = 0, @@ -188,7 +191,7 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index 02e74e21..919ac074 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc @@ -20,7 +20,7 @@ #include "absl/base/config.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { constexpr size_t Group::kWidth; @@ -44,5 +44,5 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { } } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 7b379d4f..ca7be8d8 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -118,7 +118,7 @@ #include "absl/utility/utility.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <size_t Width> @@ -615,13 +615,17 @@ class raw_hash_set { iterator() {} // PRECONDITION: not an end() iterator. - reference operator*() const { return PolicyTraits::element(slot_); } + reference operator*() const { + assert_is_full(); + return PolicyTraits::element(slot_); + } // PRECONDITION: not an end() iterator. pointer operator->() const { return &operator*(); } // PRECONDITION: not an end() iterator. iterator& operator++() { + assert_is_full(); ++ctrl_; ++slot_; skip_empty_or_deleted(); @@ -635,6 +639,8 @@ class raw_hash_set { } friend bool operator==(const iterator& a, const iterator& b) { + a.assert_is_valid(); + b.assert_is_valid(); return a.ctrl_ == b.ctrl_; } friend bool operator!=(const iterator& a, const iterator& b) { @@ -645,6 +651,11 @@ class raw_hash_set { iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} + void assert_is_full() const { assert(IsFull(*ctrl_)); } + void assert_is_valid() const { + assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel); + } + void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { // ctrl is not necessarily aligned to Group::kWidth. It is also likely @@ -658,7 +669,7 @@ class raw_hash_set { } ctrl_t* ctrl_ = nullptr; - // To avoid uninitialized member warnigs, put slot_ in an anonymous union. + // To avoid uninitialized member warnings, put slot_ in an anonymous union. // The member is not initialized on singleton and end iterators. union { slot_type* slot_; @@ -939,8 +950,11 @@ class raw_hash_set { // // flat_hash_map<std::string, int> m; // m.insert(std::make_pair("abc", 42)); + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. template <class T, RequiresInsertable<T> = 0, - typename std::enable_if<IsDecomposable<T>::value, int>::type = 0, + class T2 = T, + typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0, T* = nullptr> std::pair<iterator, bool> insert(T&& value) { return emplace(std::forward<T>(value)); @@ -976,8 +990,10 @@ class raw_hash_set { return emplace(std::move(value)); } - template <class T, RequiresInsertable<T> = 0, - typename std::enable_if<IsDecomposable<T>::value, int>::type = 0, + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template <class T, RequiresInsertable<T> = 0, class T2 = T, + typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0, T* = nullptr> iterator insert(const_iterator, T&& value) { return insert(std::forward<T>(value)).first; @@ -1051,8 +1067,7 @@ class raw_hash_set { template <class... Args, typename std::enable_if< !IsDecomposable<Args...>::value, int>::type = 0> std::pair<iterator, bool> emplace(Args&&... args) { - typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type - raw; + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; slot_type* slot = reinterpret_cast<slot_type*>(&raw); PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...); @@ -1068,10 +1083,15 @@ class raw_hash_set { // Extension API: support for lazy emplace. // // Looks up key in the table. If found, returns the iterator to the element. - // Otherwise calls f with one argument of type raw_hash_set::constructor. f - // MUST call raw_hash_set::constructor with arguments as if a - // raw_hash_set::value_type is constructed, otherwise the behavior is - // undefined. + // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`. + // + // `f` must abide by several restrictions: + // - it MUST call `raw_hash_set::constructor` with arguments as if a + // `raw_hash_set::value_type` is constructed, + // - it MUST NOT access the container before the call to + // `raw_hash_set::constructor`, and + // - it MUST NOT erase the lazily emplaced element. + // Doing any of these is undefined behavior. // // For example: // @@ -1134,15 +1154,16 @@ class raw_hash_set { } // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, - // this method returns void to reduce algorithmic complexity to O(1). In - // order to erase while iterating across a map, use the following idiom (which - // also works for standard containers): + // this method returns void to reduce algorithmic complexity to O(1). The + // iterator is invalidated, so any increment should be done before calling + // erase. In order to erase while iterating across a map, use the following + // idiom (which also works for standard containers): // // for (auto it = m.begin(), end = m.end(); it != end;) { + // // `erase()` will invalidate `it`, so advance `it` first. + // auto copy_it = it++; // if (<pred>) { - // m.erase(it++); - // } else { - // ++it; + // m.erase(copy_it); // } // } void erase(const_iterator cit) { erase(cit.inner_); } @@ -1150,7 +1171,7 @@ class raw_hash_set { // This overload is necessary because otherwise erase<K>(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { - assert(it != end()); + it.assert_is_full(); PolicyTraits::destroy(&alloc_ref(), it.slot_); erase_meta_only(it); } @@ -1167,12 +1188,14 @@ class raw_hash_set { template <typename H, typename E> void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT assert(this != &src); - for (auto it = src.begin(), e = src.end(); it != e; ++it) { + for (auto it = src.begin(), e = src.end(); it != e;) { + auto next = std::next(it); if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)}, PolicyTraits::element(it.slot_)) .second) { src.erase_meta_only(it); } + it = next; } } @@ -1182,6 +1205,7 @@ class raw_hash_set { } node_type extract(const_iterator position) { + position.inner_.assert_is_full(); auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_); erase_meta_only(position); @@ -1531,8 +1555,7 @@ class raw_hash_set { // mark target as FULL // repeat procedure for current slot with moved from element (target) ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); - typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type - raw; + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; size_t total_probe_length = 0; slot_type* slot = reinterpret_cast<slot_type*>(&raw); for (size_t i = 0; i != capacity_; ++i) { @@ -1781,6 +1804,17 @@ class raw_hash_set { settings_{0, hasher{}, key_equal{}, allocator_type{}}; }; +// Erases all elements that satisfy the predicate `pred` from the container `c`. +template <typename P, typename H, typename E, typename A, typename Predicate> +void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) { + for (auto it = c->begin(), last = c->end(); it != last;) { + auto copy_it = it++; + if (pred(*copy_it)) { + c->erase(copy_it); + } + } +} + namespace hashtable_debug_internal { template <typename Set> struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> { @@ -1842,7 +1876,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> { } // namespace hashtable_debug_internal } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc index 5188b3ae..7ac4b9f7 100644 --- a/absl/container/internal/raw_hash_set_allocator_test.cc +++ b/absl/container/internal/raw_hash_set_allocator_test.cc @@ -20,7 +20,7 @@ #include "absl/container/internal/tracked.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -426,5 +426,5 @@ TEST_F(PropagateOnAll, Swap) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 2783f5c4..a96ae68a 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -35,7 +35,7 @@ #include "absl/strings/string_view.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { struct RawHashSetTestOnlyAccess { @@ -418,53 +418,6 @@ TEST(Table, Empty) { EXPECT_TRUE(t.empty()); } -#ifdef __GNUC__ -template <class T> -ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) { - asm volatile("" : : "r,m"(v) : "memory"); -} -#endif - -TEST(Table, Prefetch) { - IntTable t; - t.emplace(1); - // Works for both present and absent keys. - t.prefetch(1); - t.prefetch(2); - - // Do not run in debug mode, when prefetch is not implemented, or when - // sanitizers are enabled, or on WebAssembly. -#if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) && \ - !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \ - !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \ - !defined(__EMSCRIPTEN__) - const auto now = [] { return absl::base_internal::CycleClock::Now(); }; - - // Make size enough to not fit in L2 cache (16.7 Mb) - static constexpr int size = 1 << 22; - for (int i = 0; i < size; ++i) t.insert(i); - - int64_t no_prefetch = 0, prefetch = 0; - for (int iter = 0; iter < 10; ++iter) { - int64_t time = now(); - for (int i = 0; i < size; ++i) { - DoNotOptimize(t.find(i)); - } - no_prefetch += now() - time; - - time = now(); - for (int i = 0; i < size; ++i) { - t.prefetch(i + 20); - DoNotOptimize(t.find(i)); - } - prefetch += now() - time; - } - - // no_prefetch is at least 30% slower. - EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3); -#endif -} - TEST(Table, LookupEmpty) { IntTable t; auto it = t.find(0); @@ -1838,10 +1791,11 @@ TEST(TableDeathTest, EraseOfEndAsserts) { IntTable t; // Extra simple "regexp" as regexp support is highly varied across platforms. - constexpr char kDeathMsg[] = "it != end"; + constexpr char kDeathMsg[] = "IsFull"; EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg); } +#if defined(ABSL_HASHTABLEZ_SAMPLE) TEST(RawHashSamplerTest, Sample) { // Enable the feature even if the prod default is off. SetHashtablezEnabled(true); @@ -1862,6 +1816,7 @@ TEST(RawHashSamplerTest, Sample) { EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()), 0.01, 0.005); } +#endif // ABSL_HASHTABLEZ_SAMPLER TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) { // Enable the feature even if the prod default is off. @@ -1912,5 +1867,5 @@ TEST(Sanitizer, PoisoningOnErase) { } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/test_instance_tracker.cc b/absl/container/internal/test_instance_tracker.cc index f4b283fd..f9947f04 100644 --- a/absl/container/internal/test_instance_tracker.cc +++ b/absl/container/internal/test_instance_tracker.cc @@ -15,7 +15,7 @@ #include "absl/container/internal/test_instance_tracker.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace test_internal { int BaseCountedInstance::num_instances_ = 0; int BaseCountedInstance::num_live_instances_ = 0; @@ -25,5 +25,5 @@ int BaseCountedInstance::num_swaps_ = 0; int BaseCountedInstance::num_comparisons_ = 0; } // namespace test_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/test_instance_tracker.h b/absl/container/internal/test_instance_tracker.h index ab7f9f22..5ff6fd71 100644 --- a/absl/container/internal/test_instance_tracker.h +++ b/absl/container/internal/test_instance_tracker.h @@ -21,7 +21,7 @@ #include "absl/types/compare.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace test_internal { // A type that counts number of occurrences of the type, the live occurrences of @@ -268,7 +268,7 @@ class MovableOnlyInstance : public BaseCountedInstance { }; } // namespace test_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ diff --git a/absl/container/internal/tracked.h b/absl/container/internal/tracked.h index e9e6b95d..29f5829f 100644 --- a/absl/container/internal/tracked.h +++ b/absl/container/internal/tracked.h @@ -16,11 +16,14 @@ #define ABSL_CONTAINER_INTERNAL_TRACKED_H_ #include <stddef.h> + #include <memory> #include <utility> +#include "absl/base/config.h" + namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { // A class that tracks its copies and moves so that it can be queried in tests. @@ -74,7 +77,7 @@ class Tracked { }; } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_ diff --git a/absl/container/internal/unordered_map_constructor_test.h b/absl/container/internal/unordered_map_constructor_test.h index b64b5520..76ee95e6 100644 --- a/absl/container/internal/unordered_map_constructor_test.h +++ b/absl/container/internal/unordered_map_constructor_test.h @@ -24,7 +24,7 @@ #include "absl/container/internal/hash_policy_testing.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordMap> @@ -483,7 +483,7 @@ REGISTER_TYPED_TEST_CASE_P( AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ diff --git a/absl/container/internal/unordered_map_lookup_test.h b/absl/container/internal/unordered_map_lookup_test.h index 9ad78a79..e76421e5 100644 --- a/absl/container/internal/unordered_map_lookup_test.h +++ b/absl/container/internal/unordered_map_lookup_test.h @@ -21,7 +21,7 @@ #include "absl/container/internal/hash_policy_testing.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordMap> @@ -111,7 +111,7 @@ REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find, EqualRange); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ diff --git a/absl/container/internal/unordered_map_members_test.h b/absl/container/internal/unordered_map_members_test.h index c4600405..7d48cdb8 100644 --- a/absl/container/internal/unordered_map_members_test.h +++ b/absl/container/internal/unordered_map_members_test.h @@ -21,7 +21,7 @@ #include "absl/meta/type_traits.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordMap> @@ -81,7 +81,7 @@ TYPED_TEST_P(MembersTest, BeginEnd) { REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h index 89dd7894..b8c513f1 100644 --- a/absl/container/internal/unordered_map_modifiers_test.h +++ b/absl/container/internal/unordered_map_modifiers_test.h @@ -15,13 +15,15 @@ #ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ #define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ +#include <memory> + #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_policy_testing.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordMap> @@ -268,8 +270,47 @@ REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, Emplace, EmplaceHint, TryEmplace, TryEmplaceHint, Erase, EraseRange, EraseKey, Swap); +template <typename Type> +struct is_unique_ptr : std::false_type {}; + +template <typename Type> +struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type {}; + +template <class UnordMap> +class UniquePtrModifiersTest : public ::testing::Test { + protected: + UniquePtrModifiersTest() { + static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value, + "UniquePtrModifiersTyest may only be called with a " + "std::unique_ptr value type."); + } +}; + +TYPED_TEST_SUITE_P(UniquePtrModifiersTest); + +// Test that we do not move from rvalue arguments if an insertion does not +// happen. +TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType<TypeParam>; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator<T>()(); + TypeParam m; + auto p = m.try_emplace(val.first, std::move(val.second)); + EXPECT_TRUE(p.second); + // A moved from std::unique_ptr is guaranteed to be nullptr. + EXPECT_EQ(val.second, nullptr); + T val2 = {val.first, hash_internal::Generator<V>()()}; + p = m.try_emplace(val2.first, std::move(val2.second)); + EXPECT_FALSE(p.second); + EXPECT_NE(val2.second, nullptr); +#endif +} + +REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace); + } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ diff --git a/absl/container/internal/unordered_map_test.cc b/absl/container/internal/unordered_map_test.cc index 51a90af8..9cbf512f 100644 --- a/absl/container/internal/unordered_map_test.cc +++ b/absl/container/internal/unordered_map_test.cc @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include <memory> #include <unordered_map> #include "absl/container/internal/unordered_map_constructor_test.h" @@ -20,7 +21,7 @@ #include "absl/container/internal/unordered_map_modifiers_test.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -36,7 +37,14 @@ INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, LookupTest, MapTypes); INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, MembersTest, MapTypes); INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ModifiersTest, MapTypes); +using UniquePtrMapTypes = ::testing::Types<std::unordered_map< + int, std::unique_ptr<int>, StatefulTestingHash, StatefulTestingEqual, + Alloc<std::pair<const int, std::unique_ptr<int>>>>>; + +INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, UniquePtrModifiersTest, + UniquePtrMapTypes); + } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/unordered_set_constructor_test.h b/absl/container/internal/unordered_set_constructor_test.h index ac73a896..41165b05 100644 --- a/absl/container/internal/unordered_set_constructor_test.h +++ b/absl/container/internal/unordered_set_constructor_test.h @@ -26,7 +26,7 @@ #include "absl/meta/type_traits.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordMap> @@ -490,7 +490,7 @@ REGISTER_TYPED_TEST_CASE_P( AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ diff --git a/absl/container/internal/unordered_set_lookup_test.h b/absl/container/internal/unordered_set_lookup_test.h index 722fb1c2..8f2f4b20 100644 --- a/absl/container/internal/unordered_set_lookup_test.h +++ b/absl/container/internal/unordered_set_lookup_test.h @@ -21,7 +21,7 @@ #include "absl/container/internal/hash_policy_testing.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordSet> @@ -85,7 +85,7 @@ TYPED_TEST_P(LookupTest, EqualRange) { REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ diff --git a/absl/container/internal/unordered_set_members_test.h b/absl/container/internal/unordered_set_members_test.h index 756a95cb..4c5e104a 100644 --- a/absl/container/internal/unordered_set_members_test.h +++ b/absl/container/internal/unordered_set_members_test.h @@ -21,7 +21,7 @@ #include "absl/meta/type_traits.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordSet> @@ -80,7 +80,7 @@ TYPED_TEST_P(MembersTest, BeginEnd) { REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ diff --git a/absl/container/internal/unordered_set_modifiers_test.h b/absl/container/internal/unordered_set_modifiers_test.h index d3e534d3..26be58d9 100644 --- a/absl/container/internal/unordered_set_modifiers_test.h +++ b/absl/container/internal/unordered_set_modifiers_test.h @@ -21,7 +21,7 @@ #include "absl/container/internal/hash_policy_testing.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class UnordSet> @@ -184,7 +184,7 @@ REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint, EraseKey, Swap); } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ diff --git a/absl/container/internal/unordered_set_test.cc b/absl/container/internal/unordered_set_test.cc index 2356e187..a134b539 100644 --- a/absl/container/internal/unordered_set_test.cc +++ b/absl/container/internal/unordered_set_test.cc @@ -20,7 +20,7 @@ #include "absl/container/internal/unordered_set_modifiers_test.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { @@ -37,5 +37,5 @@ INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ModifiersTest, SetTypes); } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h index addf120f..fccea184 100644 --- a/absl/container/node_hash_map.h +++ b/absl/container/node_hash_map.h @@ -48,7 +48,7 @@ #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <class Key, class Value> class NodeHashMapPolicy; @@ -352,6 +352,10 @@ class node_hash_map // Inserts (via copy or move) the element of the specified key into the // `node_hash_map` using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. using Base::try_emplace; // node_hash_map::extract() @@ -518,6 +522,15 @@ class node_hash_map void resize(typename Base::size_type hint) { this->rehash(hint); } }; +// erase_if(node_hash_map<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +template <typename K, typename V, typename H, typename E, typename A, + typename Predicate> +void erase_if(node_hash_map<K, V, H, E, A>& c, Predicate pred) { + container_internal::EraseIf(pred, &c); +} + namespace container_internal { template <class Key, class Value> @@ -578,7 +591,7 @@ struct IsUnorderedContainer< } // namespace container_algorithm_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_NODE_HASH_MAP_H_ diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc index 7ce7ca02..5d74b814 100644 --- a/absl/container/node_hash_map_test.cc +++ b/absl/container/node_hash_map_test.cc @@ -21,11 +21,12 @@ #include "absl/container/internal/unordered_map_modifiers_test.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::testing::Field; +using ::testing::IsEmpty; using ::testing::Pair; using ::testing::UnorderedElementsAre; @@ -216,7 +217,44 @@ TEST(NodeHashMap, MergeExtractInsert) { EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(17, 23))); } +bool FirstIsEven(std::pair<const int, int> p) { return p.first % 2 == 0; } + +TEST(NodeHashMap, EraseIf) { + // Erase all elements. + { + node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, [](std::pair<const int, int>) { return true; }); + EXPECT_THAT(s, IsEmpty()); + } + // Erase no elements. + { + node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, [](std::pair<const int, int>) { return false; }); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), + Pair(4, 4), Pair(5, 5))); + } + // Erase specific elements. + { + node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, + [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; }); + EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); + } + // Predicate is function reference. + { + node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, FirstIsEven); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); + } + // Predicate is function pointer. + { + node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; + erase_if(s, &FirstIsEven); + EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); + } +} + } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h index 103d32d2..ad54b6dc 100644 --- a/absl/container/node_hash_set.h +++ b/absl/container/node_hash_set.h @@ -44,7 +44,7 @@ #include "absl/memory/memory.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { template <typename T> struct NodeHashSetPolicy; @@ -77,7 +77,7 @@ struct NodeHashSetPolicy; // // // Create a node hash set of three strings // absl::node_hash_map<std::string, std::string> ducks = -// {"huey", "dewey"}, "louie"}; +// {"huey", "dewey", "louie"}; // // // Insert a new element into the node hash map // ducks.insert("donald"}; @@ -111,7 +111,7 @@ class node_hash_set // * Initializer List constructor // // absl::node_hash_set<std::string> set2 = - // {{"huey"}, {"dewey"}, {"louie"},}; + // {{"huey"}, {"dewey"}, {"louie"}}; // // * Copy constructor // @@ -435,6 +435,14 @@ class node_hash_set void resize(typename Base::size_type hint) { this->rehash(hint); } }; +// erase_if(node_hash_set<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +template <typename T, typename H, typename E, typename A, typename Predicate> +void erase_if(node_hash_set<T, H, E, A>& c, Predicate pred) { + container_internal::EraseIf(pred, &c); +} + namespace container_internal { template <class T> @@ -484,7 +492,7 @@ struct IsUnorderedContainer<absl::node_hash_set<Key, Hash, KeyEqual, Allocator>> : std::true_type {}; } // namespace container_algorithm_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_NODE_HASH_SET_H_ diff --git a/absl/container/node_hash_set_test.cc b/absl/container/node_hash_set_test.cc index 65d125ed..7ddad202 100644 --- a/absl/container/node_hash_set_test.cc +++ b/absl/container/node_hash_set_test.cc @@ -20,11 +20,12 @@ #include "absl/container/internal/unordered_set_modifiers_test.h" namespace absl { -inline namespace lts_2019_08_08 { +ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::container_internal::hash_internal::Enum; using ::absl::container_internal::hash_internal::EnumClass; +using ::testing::IsEmpty; using ::testing::Pointee; using ::testing::UnorderedElementsAre; @@ -101,7 +102,42 @@ TEST(NodeHashSet, MergeExtractInsert) { EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); } +bool IsEven(int k) { return k % 2 == 0; } + +TEST(NodeHashSet, EraseIf) { + // Erase all elements. + { + node_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, [](int) { return true; }); + EXPECT_THAT(s, IsEmpty()); + } + // Erase no elements. + { + node_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, [](int) { return false; }); + EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); + } + // Erase specific elements. + { + node_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, [](int k) { return k % 2 == 1; }); + EXPECT_THAT(s, UnorderedElementsAre(2, 4)); + } + // Predicate is function reference. + { + node_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, IsEven); + EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); + } + // Predicate is function pointer. + { + node_hash_set<int> s = {1, 2, 3, 4, 5}; + erase_if(s, &IsEven); + EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); + } +} + } // namespace } // namespace container_internal -} // inline namespace lts_2019_08_08 +ABSL_NAMESPACE_END } // namespace absl |