summaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/BUILD.bazel89
-rw-r--r--absl/container/CMakeLists.txt138
-rw-r--r--absl/container/btree_benchmark.cc64
-rw-r--r--absl/container/btree_map.h153
-rw-r--r--absl/container/btree_set.h178
-rw-r--r--absl/container/btree_test.cc461
-rw-r--r--absl/container/fixed_array.h7
-rw-r--r--absl/container/flat_hash_map.h15
-rw-r--r--absl/container/flat_hash_map_test.cc41
-rw-r--r--absl/container/flat_hash_set.h24
-rw-r--r--absl/container/flat_hash_set_test.cc10
-rw-r--r--absl/container/inlined_vector.h217
-rw-r--r--absl/container/inlined_vector_test.cc17
-rw-r--r--absl/container/internal/btree.h1090
-rw-r--r--absl/container/internal/btree_container.h53
-rw-r--r--absl/container/internal/common.h11
-rw-r--r--absl/container/internal/compressed_tuple_test.cc10
-rw-r--r--absl/container/internal/container_memory.h38
-rw-r--r--absl/container/internal/counting_allocator.h8
-rw-r--r--absl/container/internal/hash_function_defaults.h32
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc2
-rw-r--r--absl/container/internal/hash_generator_testing.h21
-rw-r--r--absl/container/internal/hashtablez_sampler.cc190
-rw-r--r--absl/container/internal/hashtablez_sampler.h147
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc93
-rw-r--r--absl/container/internal/have_sse.h50
-rw-r--r--absl/container/internal/inlined_vector.h848
-rw-r--r--absl/container/internal/layout_test.cc6
-rw-r--r--absl/container/internal/node_slot_policy.h (renamed from absl/container/internal/node_hash_policy.h)8
-rw-r--r--absl/container/internal/node_slot_policy_test.cc (renamed from absl/container/internal/node_hash_policy_test.cc)4
-rw-r--r--absl/container/internal/raw_hash_map.h5
-rw-r--r--absl/container/internal/raw_hash_set.cc26
-rw-r--r--absl/container/internal/raw_hash_set.h986
-rw-r--r--absl/container/internal/raw_hash_set_benchmark.cc77
-rw-r--r--absl/container/internal/raw_hash_set_test.cc295
-rw-r--r--absl/container/internal/unordered_map_constructor_test.h40
-rw-r--r--absl/container/internal/unordered_map_lookup_test.h4
-rw-r--r--absl/container/internal/unordered_map_modifiers_test.h42
-rw-r--r--absl/container/internal/unordered_set_constructor_test.h2
-rw-r--r--absl/container/internal/unordered_set_lookup_test.h2
-rw-r--r--absl/container/internal/unordered_set_modifiers_test.h37
-rw-r--r--absl/container/node_hash_map.h19
-rw-r--r--absl/container/node_hash_map_test.cc15
-rw-r--r--absl/container/node_hash_set.h15
-rw-r--r--absl/container/node_hash_set_test.cc10
-rw-r--r--absl/container/sample_element_size_test.cc114
46 files changed, 3771 insertions, 1943 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index f22fdc60..d01d78e5 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -14,7 +14,6 @@
# limitations under the License.
#
-load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
@@ -218,11 +217,6 @@ cc_test(
],
)
-NOTEST_TAGS_NONMOBILE = [
- "no_test_darwin_x86_64",
- "no_test_loonix",
-]
-
NOTEST_TAGS_MOBILE = [
"no_test_android_arm",
"no_test_android_arm64",
@@ -230,8 +224,6 @@ NOTEST_TAGS_MOBILE = [
"no_test_ios_x86_64",
]
-NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE
-
cc_library(
name = "flat_hash_map",
hdrs = ["flat_hash_map.h"],
@@ -242,6 +234,7 @@ cc_library(
":hash_function_defaults",
":raw_hash_map",
"//absl/algorithm:container",
+ "//absl/base:core_headers",
"//absl/memory",
],
)
@@ -251,7 +244,7 @@ cc_test(
srcs = ["flat_hash_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":flat_hash_map",
":hash_generator_testing",
@@ -285,7 +278,7 @@ cc_test(
srcs = ["flat_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":flat_hash_set",
":hash_generator_testing",
@@ -308,9 +301,10 @@ cc_library(
deps = [
":container_memory",
":hash_function_defaults",
- ":node_hash_policy",
+ ":node_slot_policy",
":raw_hash_map",
"//absl/algorithm:container",
+ "//absl/base:core_headers",
"//absl/memory",
],
)
@@ -320,7 +314,7 @@ cc_test(
srcs = ["node_hash_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":hash_generator_testing",
":node_hash_map",
@@ -340,9 +334,10 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_function_defaults",
- ":node_hash_policy",
+ ":node_slot_policy",
":raw_hash_set",
"//absl/algorithm:container",
+ "//absl/base:core_headers",
"//absl/memory",
],
)
@@ -352,7 +347,7 @@ cc_test(
srcs = ["node_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":node_hash_set",
":unordered_set_constructor_test",
@@ -381,7 +376,7 @@ cc_test(
srcs = ["internal/container_memory_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":container_memory",
":test_instance_tracker",
@@ -408,7 +403,7 @@ cc_test(
srcs = ["internal/hash_function_defaults_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS,
+ tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
deps = [
":hash_function_defaults",
"//absl/hash",
@@ -507,12 +502,13 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
- ":have_sse",
"//absl/base",
+ "//absl/base:config",
"//absl/base:core_headers",
- "//absl/base:exponential_biased",
"//absl/debugging:stacktrace",
"//absl/memory",
+ "//absl/profiling:exponential_biased",
+ "//absl/profiling:sample_recorder",
"//absl/synchronization",
"//absl/utility",
],
@@ -522,10 +518,14 @@ cc_test(
name = "hashtablez_sampler_test",
srcs = ["internal/hashtablez_sampler_test.cc"],
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":hashtablez_sampler",
- ":have_sse",
+ "//absl/base:config",
"//absl/base:core_headers",
+ "//absl/profiling:sample_recorder",
"//absl/synchronization",
"//absl/synchronization:thread_pool",
"//absl/time",
@@ -534,21 +534,21 @@ cc_test(
)
cc_library(
- name = "node_hash_policy",
- hdrs = ["internal/node_hash_policy.h"],
+ name = "node_slot_policy",
+ hdrs = ["internal/node_slot_policy.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/base:config"],
)
cc_test(
- name = "node_hash_policy_test",
- srcs = ["internal/node_hash_policy_test.cc"],
+ name = "node_slot_policy_test",
+ srcs = ["internal/node_slot_policy_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_traits",
- ":node_hash_policy",
+ ":node_slot_policy",
"@com_google_googletest//:gtest_main",
],
)
@@ -566,14 +566,6 @@ cc_library(
)
cc_library(
- name = "have_sse",
- hdrs = ["internal/have_sse.h"],
- copts = ABSL_DEFAULT_COPTS,
- linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = ["//visibility:private"],
-)
-
-cc_library(
name = "common",
hdrs = ["internal/common.h"],
copts = ABSL_DEFAULT_COPTS,
@@ -597,11 +589,10 @@ cc_library(
":hash_policy_traits",
":hashtable_debug_hooks",
":hashtablez_sampler",
- ":have_sse",
- ":layout",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:endian",
+ "//absl/base:prefetch",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/numeric:bits",
@@ -614,7 +605,7 @@ cc_test(
srcs = ["internal/raw_hash_set_test.cc"],
copts = ABSL_TEST_COPTS,
linkstatic = 1,
- tags = NOTEST_TAGS,
+ tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
deps = [
":container_memory",
":hash_function_defaults",
@@ -624,6 +615,7 @@ cc_test(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/base:prefetch",
"//absl/base:raw_logging_internal",
"//absl/strings",
"@com_google_googletest//:gtest_main",
@@ -704,7 +696,7 @@ cc_test(
srcs = ["internal/layout_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS,
+ tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
visibility = ["//visibility:private"],
deps = [
":layout",
@@ -851,7 +843,7 @@ cc_test(
srcs = ["internal/unordered_set_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":unordered_set_constructor_test",
":unordered_set_lookup_test",
@@ -866,7 +858,7 @@ cc_test(
srcs = ["internal/unordered_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":unordered_map_constructor_test",
":unordered_map_lookup_test",
@@ -876,6 +868,22 @@ cc_test(
],
)
+cc_test(
+ name = "sample_element_size_test",
+ srcs = ["sample_element_size_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["no_test_loonix"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":flat_hash_map",
+ ":flat_hash_set",
+ ":node_hash_map",
+ ":node_hash_set",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_library(
name = "btree",
srcs = [
@@ -895,6 +903,7 @@ cc_library(
":container_memory",
":layout",
"//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
"//absl/base:throw_delegate",
"//absl/memory",
"//absl/meta:type_traits",
@@ -930,6 +939,10 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
shard_count = 10,
+ tags = [
+ "no_test_ios",
+ "no_test_wasm",
+ ],
visibility = ["//visibility:private"],
deps = [
":btree",
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 2d7d0e65..9b5c59a4 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -35,12 +35,14 @@ absl_cc_library(
absl::core_headers
absl::layout
absl::memory
+ absl::raw_logging_internal
absl::strings
absl::throw_delegate
absl::type_traits
absl::utility
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
btree_test_common
@@ -80,9 +82,10 @@ absl_cc_test(
absl::strings
absl::test_instance_tracker
absl::type_traits
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
compressed_tuple
@@ -109,7 +112,7 @@ absl_cc_test(
absl::optional
absl::test_instance_tracker
absl::utility
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -144,7 +147,7 @@ absl_cc_test(
absl::exception_testing
absl::hash_testing
absl::memory
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -158,9 +161,10 @@ absl_cc_test(
absl::fixed_array
absl::config
absl::exception_safety_testing
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
inlined_vector_internal
@@ -193,6 +197,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
counting_allocator
@@ -222,7 +227,7 @@ absl_cc_test(
absl::memory
absl::raw_logging_internal
absl::strings
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -236,9 +241,10 @@ absl_cc_test(
absl::inlined_vector
absl::config
absl::exception_safety_testing
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
test_instance_tracker
@@ -262,7 +268,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::test_instance_tracker
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -274,6 +280,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
+ absl::core_headers
absl::hash_function_defaults
absl::raw_hash_map
absl::algorithm_container
@@ -297,7 +304,7 @@ absl_cc_test(
absl::unordered_map_modifiers_test
absl::any
absl::raw_logging_internal
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -335,7 +342,7 @@ absl_cc_test(
absl::memory
absl::raw_logging_internal
absl::strings
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -347,8 +354,9 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
+ absl::core_headers
absl::hash_function_defaults
- absl::node_hash_policy
+ absl::node_slot_policy
absl::raw_hash_map
absl::algorithm_container
absl::memory
@@ -370,7 +378,7 @@ absl_cc_test(
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -381,8 +389,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::core_headers
absl::hash_function_defaults
- absl::node_hash_policy
+ absl::node_slot_policy
absl::raw_hash_set
absl::algorithm_container
absl::memory
@@ -404,9 +413,10 @@ absl_cc_test(
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
container_memory
@@ -433,9 +443,10 @@ absl_cc_test(
absl::container_memory
absl::strings
absl::test_instance_tracker
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_function_defaults
@@ -465,9 +476,10 @@ absl_cc_test(
absl::hash
absl::random_random
absl::strings
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_generator_testing
@@ -485,6 +497,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_policy_testing
@@ -507,9 +520,10 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_testing
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_policy_traits
@@ -531,9 +545,10 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hashtablez_sampler
@@ -546,8 +561,9 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
+ absl::config
absl::exponential_biased
- absl::have_sse
+ absl::sample_recorder
absl::synchronization
)
@@ -559,11 +575,12 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::config
absl::hashtablez_sampler
- absl::have_sse
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hashtable_debug
@@ -575,6 +592,7 @@ absl_cc_library(
absl::hashtable_debug_hooks
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hashtable_debug_hooks
@@ -587,20 +605,12 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
- have_sse
- HDRS
- "internal/have_sse.h"
- COPTS
- ${ABSL_DEFAULT_COPTS}
-)
-
-absl_cc_library(
- NAME
- node_hash_policy
+ node_slot_policy
HDRS
- "internal/node_hash_policy.h"
+ "internal/node_slot_policy.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
@@ -610,17 +620,18 @@ absl_cc_library(
absl_cc_test(
NAME
- node_hash_policy_test
+ node_slot_policy_test
SRCS
- "internal/node_hash_policy_test.cc"
+ "internal/node_slot_policy_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
- absl::node_hash_policy
- gmock_main
+ absl::node_slot_policy
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
raw_hash_map
@@ -635,6 +646,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
container_common
@@ -646,6 +658,7 @@ absl_cc_library(
absl::type_traits
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
raw_hash_set
@@ -665,11 +678,10 @@ absl_cc_library(
absl::endian
absl::hash_policy_traits
absl::hashtable_debug_hooks
- absl::have_sse
- absl::layout
absl::memory
absl::meta
absl::optional
+ absl::prefetch
absl::utility
absl::hashtablez_sampler
PUBLIC
@@ -691,9 +703,10 @@ absl_cc_test(
absl::base
absl::config
absl::core_headers
+ absl::prefetch
absl::raw_logging_internal
absl::strings
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -707,9 +720,10 @@ absl_cc_test(
absl::raw_hash_set
absl::tracked
absl::core_headers
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
layout
@@ -740,9 +754,10 @@ absl_cc_test(
absl::core_headers
absl::raw_logging_internal
absl::span
- gmock_main
+ GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
tracked
@@ -755,6 +770,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_constructor_test
@@ -765,10 +781,11 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_lookup_test
@@ -779,10 +796,11 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_members_test
@@ -792,10 +810,11 @@ absl_cc_library(
${ABSL_TEST_COPTS}
DEPS
absl::type_traits
- gmock
+ GTest::gmock
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_modifiers_test
@@ -806,10 +825,11 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_constructor_test
@@ -820,10 +840,11 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_lookup_test
@@ -834,10 +855,11 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_members_test
@@ -847,10 +869,11 @@ absl_cc_library(
${ABSL_TEST_COPTS}
DEPS
absl::type_traits
- gmock
+ GTest::gmock
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_modifiers_test
@@ -861,7 +884,7 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
@@ -877,7 +900,7 @@ absl_cc_test(
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -892,5 +915,20 @@ absl_cc_test(
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
- gmock_main
+ GTest::gmock_main
+)
+
+absl_cc_test(
+ NAME
+ sample_element_size_test
+ SRCS
+ "sample_element_size_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::flat_hash_map
+ absl::flat_hash_set
+ absl::node_hash_map
+ absl::node_hash_set
+ GTest::gmock_main
)
diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc
index 65b6790b..0ca497c8 100644
--- a/absl/container/btree_benchmark.cc
+++ b/absl/container/btree_benchmark.cc
@@ -153,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) {
BM_LookupImpl<T>(state, true);
}
-// Benchmark deletion of values from a container.
+// Benchmark erasing values from a container.
template <typename T>
-void BM_Delete(benchmark::State& state) {
+void BM_Erase(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type;
typename KeyOfValue<typename T::key_type, V>::type key_of_value;
std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
@@ -180,9 +180,9 @@ void BM_Delete(benchmark::State& state) {
}
}
-// Benchmark deletion of multiple values from a container.
+// Benchmark erasing multiple values from a container.
template <typename T>
-void BM_DeleteRange(benchmark::State& state) {
+void BM_EraseRange(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type;
typename KeyOfValue<typename T::key_type, V>::type key_of_value;
std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
@@ -222,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) {
}
}
+// Predicate that erases every other element. We can't use a lambda because
+// C++11 doesn't support generic lambdas.
+// TODO(b/207389011): consider adding benchmarks that remove different fractions
+// of keys (e.g. 10%, 90%).
+struct EraseIfPred {
+ uint64_t i = 0;
+ template <typename T>
+ bool operator()(const T&) {
+ return ++i % 2;
+ }
+};
+
+// Benchmark erasing multiple values from a container with a predicate.
+template <typename T>
+void BM_EraseIf(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+
+ // Removes half of the keys per batch.
+ const int batch_size = (kBenchmarkValues + 1) / 2;
+ EraseIfPred pred;
+ while (state.KeepRunningBatch(batch_size)) {
+ state.PauseTiming();
+ {
+ T container(values.begin(), values.end());
+ state.ResumeTiming();
+ erase_if(container, pred);
+ benchmark::DoNotOptimize(container);
+ state.PauseTiming();
+ }
+ state.ResumeTiming();
+ }
+}
+
// Benchmark steady-state insert (into first half of range) and remove (from
// second half of range), treating the container approximately like a queue with
// log-time access for all elements. This benchmark does not test the case where
@@ -477,14 +511,14 @@ BTREE_TYPES(Time);
void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
BENCHMARK(BM_##type##_##func)
-#define MY_BENCHMARK3(type) \
+#define MY_BENCHMARK3_STL(type) \
MY_BENCHMARK4(type, Insert); \
MY_BENCHMARK4(type, InsertSorted); \
MY_BENCHMARK4(type, InsertSmall); \
MY_BENCHMARK4(type, Lookup); \
MY_BENCHMARK4(type, FullLookup); \
- MY_BENCHMARK4(type, Delete); \
- MY_BENCHMARK4(type, DeleteRange); \
+ MY_BENCHMARK4(type, Erase); \
+ MY_BENCHMARK4(type, EraseRange); \
MY_BENCHMARK4(type, QueueAddRem); \
MY_BENCHMARK4(type, MixedAddRem); \
MY_BENCHMARK4(type, Fifo); \
@@ -492,9 +526,13 @@ BTREE_TYPES(Time);
MY_BENCHMARK4(type, InsertRangeRandom); \
MY_BENCHMARK4(type, InsertRangeSorted)
+#define MY_BENCHMARK3(type) \
+ MY_BENCHMARK4(type, EraseIf); \
+ MY_BENCHMARK3_STL(type)
+
#define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
- MY_BENCHMARK3(stl_##type); \
- MY_BENCHMARK3(stl_unordered_##type); \
+ MY_BENCHMARK3_STL(stl_##type); \
+ MY_BENCHMARK3_STL(stl_unordered_##type); \
MY_BENCHMARK3(btree_256_##type)
#define MY_BENCHMARK2(type) \
@@ -684,12 +722,12 @@ double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
btree_set<BigTypePtr<SIZE>>; \
using btree_256_map_size##SIZE##copies##SIZE##ptr = \
btree_map<int, BigTypePtr<SIZE>>; \
- MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \
- MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
- MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \
- MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index ea49d446..286817f1 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -35,14 +35,17 @@
//
// However, these types should not be considered drop-in replacements for
// `std::map` and `std::multimap` as there are some API differences, which are
-// noted in this header file.
+// noted in this header file. The most consequential differences with respect to
+// migrating to b-tree from the STL types are listed in the next paragraph.
+// Other API differences are minor.
//
// Importantly, insertions and deletions may invalidate outstanding iterators,
// pointers, and references to elements. Such invalidations are typically only
// an issue if insertion and deletion operations are interleaved with the use of
// more than one iterator, pointer, or reference simultaneously. For this
// reason, `insert()` and `erase()` return a valid iterator at the current
-// position.
+// position. Another important difference is that key-types must be
+// copy-constructible.
#ifndef ABSL_CONTAINER_BTREE_MAP_H_
#define ABSL_CONTAINER_BTREE_MAP_H_
@@ -53,6 +56,14 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename Key, typename Data, typename Compare, typename Alloc,
+ int TargetNodeSize, bool IsMulti>
+struct map_params;
+
+} // namespace container_internal
+
// absl::btree_map<>
//
// An `absl::btree_map<K, V>` is an ordered associative container of
@@ -74,7 +85,7 @@ class btree_map
: public container_internal::btree_map_container<
container_internal::btree<container_internal::map_params<
Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/false>>> {
+ /*IsMulti=*/false>>> {
using Base = typename btree_map::btree_map_container;
public:
@@ -366,8 +377,8 @@ class btree_map
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_map`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_map::count()
@@ -378,8 +389,8 @@ class btree_map
// the `btree_map`. Note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `btree_map`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_map::equal_range()
@@ -395,10 +406,34 @@ class btree_map
//
// Finds an element with the passed `key` within the `btree_map`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_map::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is not less than `key` within the
+ // `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_map::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is greater than `key` within the
+ // `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_map::operator[]()
//
// Returns a reference to the value mapped to the passed key within the
@@ -443,15 +478,11 @@ void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) {
// absl::erase_if(absl::btree_map<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename V, typename C, typename A, typename Pred>
-void erase_if(btree_map<K, V, C, A> &map, Pred pred) {
- for (auto it = map.begin(); it != map.end();) {
- if (pred(*it)) {
- it = map.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_map<K, V, C, A>::size_type erase_if(
+ btree_map<K, V, C, A> &map, Pred pred) {
+ return container_internal::btree_access::erase_if(map, std::move(pred));
}
// absl::btree_multimap
@@ -476,7 +507,7 @@ class btree_multimap
: public container_internal::btree_multimap_container<
container_internal::btree<container_internal::map_params<
Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/true>>> {
+ /*IsMulti=*/true>>> {
using Base = typename btree_multimap::btree_multimap_container;
public:
@@ -669,9 +700,8 @@ class btree_multimap
// btree_multimap::merge()
//
- // Extracts elements from a given `source` btree_multimap into this
- // `btree_multimap`. If the destination `btree_multimap` already contains an
- // element with an equivalent key, that element is not extracted.
+ // Extracts all elements from a given `source` btree_multimap into this
+ // `btree_multimap`.
using Base::merge;
// btree_multimap::swap(btree_multimap& other)
@@ -691,8 +721,8 @@ class btree_multimap
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_multimap`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_multimap::count()
@@ -702,8 +732,8 @@ class btree_multimap
// Returns the number of elements comparing equal to the given `key` within
// the `btree_multimap`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_multimap::equal_range()
@@ -720,10 +750,34 @@ class btree_multimap
//
// Finds an element with the passed `key` within the `btree_multimap`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_multimap::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is not less than `key` within the
+ // `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_multimap::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is greater than `key` within the
+ // `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_multimap::get_allocator()
//
// Returns the allocator function associated with this `btree_multimap`.
@@ -751,17 +805,46 @@ void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) {
// absl::erase_if(absl::btree_multimap<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename V, typename C, typename A, typename Pred>
-void erase_if(btree_multimap<K, V, C, A> &map, Pred pred) {
- for (auto it = map.begin(); it != map.end();) {
- if (pred(*it)) {
- it = map.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_multimap<K, V, C, A>::size_type erase_if(
+ btree_multimap<K, V, C, A> &map, Pred pred) {
+ return container_internal::btree_access::erase_if(map, std::move(pred));
}
+namespace container_internal {
+
+// A parameters structure for holding the type parameters for a btree_map.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Data, typename Compare, typename Alloc,
+ int TargetNodeSize, bool IsMulti>
+struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, IsMulti,
+ /*IsMap=*/true, map_slot_policy<Key, Data>> {
+ using super_type = typename map_params::common_params;
+ using mapped_type = Data;
+ // This type allows us to move keys when it is safe to do so. It is safe
+ // for maps in which value_type and mutable_value_type are layout compatible.
+ using slot_policy = typename super_type::slot_policy;
+ using slot_type = typename super_type::slot_type;
+ using value_type = typename super_type::value_type;
+ using init_type = typename super_type::init_type;
+
+ template <typename V>
+ static auto key(const V &value) -> decltype(value.first) {
+ return value.first;
+ }
+ static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+ static const Key &key(slot_type *s) { return slot_policy::key(s); }
+ // For use in node handle.
+ static auto mutable_key(slot_type *s)
+ -> decltype(slot_policy::mutable_key(s)) {
+ return slot_policy::mutable_key(s);
+ }
+ static mapped_type &value(value_type *value) { return value->second; }
+};
+
+} // namespace container_internal
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index 21ef0a03..695b09f5 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -35,7 +35,9 @@
//
// However, these types should not be considered drop-in replacements for
// `std::set` and `std::multiset` as there are some API differences, which are
-// noted in this header file.
+// noted in this header file. The most consequential differences with respect to
+// migrating to b-tree from the STL types are listed in the next paragraph.
+// Other API differences are minor.
//
// Importantly, insertions and deletions may invalidate outstanding iterators,
// pointers, and references to elements. Such invalidations are typically only
@@ -53,6 +55,17 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename Key>
+struct set_slot_policy;
+
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool IsMulti>
+struct set_params;
+
+} // namespace container_internal
+
// absl::btree_set<>
//
// An `absl::btree_set<K>` is an ordered associative container of unique key
@@ -74,7 +87,7 @@ class btree_set
: public container_internal::btree_set_container<
container_internal::btree<container_internal::set_params<
Key, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/false>>> {
+ /*IsMulti=*/false>>> {
using Base = typename btree_set::btree_set_container;
public:
@@ -300,8 +313,8 @@ class btree_set
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_set`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_set::count()
@@ -312,8 +325,8 @@ class btree_set
// the `btree_set`. Note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `btree_set`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_set::equal_range()
@@ -330,10 +343,32 @@ class btree_set
//
// Finds an element with the passed `key` within the `btree_set`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_set::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element that is not less than `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_set::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element that is greater than `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_set::get_allocator()
//
// Returns the allocator function associated with this `btree_set`.
@@ -363,15 +398,11 @@ void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) {
// absl::erase_if(absl::btree_set<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename C, typename A, typename Pred>
-void erase_if(btree_set<K, C, A> &set, Pred pred) {
- for (auto it = set.begin(); it != set.end();) {
- if (pred(*it)) {
- it = set.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set,
+ Pred pred) {
+ return container_internal::btree_access::erase_if(set, std::move(pred));
}
// absl::btree_multiset<>
@@ -396,7 +427,7 @@ class btree_multiset
: public container_internal::btree_multiset_container<
container_internal::btree<container_internal::set_params<
Key, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/true>>> {
+ /*IsMulti=*/true>>> {
using Base = typename btree_multiset::btree_multiset_container;
public:
@@ -582,9 +613,8 @@ class btree_multiset
// btree_multiset::merge()
//
- // Extracts elements from a given `source` btree_multiset into this
- // `btree_multiset`. If the destination `btree_multiset` already contains an
- // element with an equivalent key, that element is not extracted.
+ // Extracts all elements from a given `source` btree_multiset into this
+ // `btree_multiset`.
using Base::merge;
// btree_multiset::swap(btree_multiset& other)
@@ -604,8 +634,8 @@ class btree_multiset
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_multiset`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_multiset::count()
@@ -615,8 +645,8 @@ class btree_multiset
// Returns the number of elements comparing equal to the given `key` within
// the `btree_multiset`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_multiset::equal_range()
@@ -633,10 +663,34 @@ class btree_multiset
//
// Finds an element with the passed `key` within the `btree_multiset`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_multiset::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element that is not less than `key` within the
+ // `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_multiset::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element that is greater than `key` within the
+ // `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_multiset::get_allocator()
//
// Returns the allocator function associated with this `btree_multiset`.
@@ -666,17 +720,73 @@ void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) {
// absl::erase_if(absl::btree_multiset<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename C, typename A, typename Pred>
-void erase_if(btree_multiset<K, C, A> &set, Pred pred) {
- for (auto it = set.begin(); it != set.end();) {
- if (pred(*it)) {
- it = set.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_multiset<K, C, A>::size_type erase_if(
+ btree_multiset<K, C, A> & set, Pred pred) {
+ return container_internal::btree_access::erase_if(set, std::move(pred));
}
+namespace container_internal {
+
+// This type implements the necessary functions from the
+// absl::container_internal::slot_type interface for btree_(multi)set.
+template <typename Key>
+struct set_slot_policy {
+ using slot_type = Key;
+ using value_type = Key;
+ using mutable_value_type = Key;
+
+ static value_type &element(slot_type *slot) { return *slot; }
+ static const value_type &element(const slot_type *slot) { return *slot; }
+
+ template <typename Alloc, class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&...args) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
+ }
+
+ template <typename Alloc>
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
+ }
+
+ template <typename Alloc>
+ static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot, *other);
+ }
+
+ template <typename Alloc>
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ absl::allocator_traits<Alloc>::destroy(*alloc, slot);
+ }
+
+ template <typename Alloc>
+ static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
+ construct(alloc, new_slot, old_slot);
+ destroy(alloc, old_slot);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_set.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool IsMulti>
+struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, IsMulti,
+ /*IsMap=*/false, set_slot_policy<Key>> {
+ using value_type = Key;
+ using slot_type = typename set_params::common_params::slot_type;
+
+ template <typename V>
+ static const V &key(const V &value) {
+ return value;
+ }
+ static const Key &key(const slot_type *slot) { return *slot; }
+ static const Key &key(slot_type *slot) { return *slot; }
+};
+
+} // namespace container_internal
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index 74337df2..f20f3430 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -14,10 +14,14 @@
#include "absl/container/btree_test.h"
+#include <algorithm>
+#include <array>
#include <cstdint>
+#include <functional>
#include <limits>
#include <map>
#include <memory>
+#include <numeric>
#include <stdexcept>
#include <string>
#include <type_traits>
@@ -595,7 +599,7 @@ void BtreeTest() {
using V = typename remove_pair_const<typename T::value_type>::type;
const std::vector<V> random_values = GenerateValuesWithSeed<V>(
absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
- testing::GTEST_FLAG(random_seed));
+ GTEST_FLAG_GET(random_seed));
unique_checker<T, C> container;
@@ -619,7 +623,7 @@ void BtreeMultiTest() {
using V = typename remove_pair_const<typename T::value_type>::type;
const std::vector<V> random_values = GenerateValuesWithSeed<V>(
absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
- testing::GTEST_FLAG(random_seed));
+ GTEST_FLAG_GET(random_seed));
multi_checker<T, C> container;
@@ -1212,6 +1216,11 @@ class BtreeNodePeer {
constexpr static bool UsesLinearNodeSearch() {
return btree_node<typename Btree::params_type>::use_linear_search::value;
}
+
+ template <typename Btree>
+ constexpr static bool UsesGenerations() {
+ return Btree::params_type::kEnableGenerations;
+ }
};
namespace {
@@ -1285,7 +1294,7 @@ TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
std::unique_ptr<std::string> &v = m["A"];
EXPECT_TRUE(v == nullptr);
- v.reset(new std::string("X"));
+ v = absl::make_unique<std::string>("X");
auto iter = m.find("A");
EXPECT_EQ("X", *iter->second);
@@ -1344,38 +1353,34 @@ TEST(Btree, InitializerListInsert) {
EXPECT_EQ(++it, range.second);
}
-template <typename Compare, typename K>
-void AssertKeyCompareToAdapted() {
- using Adapted = typename key_compare_to_adapter<Compare>::type;
- static_assert(!std::is_same<Adapted, Compare>::value,
- "key_compare_to_adapter should have adapted this comparator.");
+template <typename Compare, typename Key>
+void AssertKeyCompareStringAdapted() {
+ using Adapted = typename key_compare_adapter<Compare, Key>::type;
static_assert(
- std::is_same<absl::weak_ordering,
- absl::result_of_t<Adapted(const K &, const K &)>>::value,
- "Adapted comparator should be a key-compare-to comparator.");
+ std::is_same<Adapted, StringBtreeDefaultLess>::value ||
+ std::is_same<Adapted, StringBtreeDefaultGreater>::value,
+ "key_compare_adapter should have string-adapted this comparator.");
}
-template <typename Compare, typename K>
-void AssertKeyCompareToNotAdapted() {
- using Unadapted = typename key_compare_to_adapter<Compare>::type;
- static_assert(
- std::is_same<Unadapted, Compare>::value,
- "key_compare_to_adapter shouldn't have adapted this comparator.");
+template <typename Compare, typename Key>
+void AssertKeyCompareNotStringAdapted() {
+ using Adapted = typename key_compare_adapter<Compare, Key>::type;
static_assert(
- std::is_same<bool,
- absl::result_of_t<Unadapted(const K &, const K &)>>::value,
- "Un-adapted comparator should return bool.");
+ !std::is_same<Adapted, StringBtreeDefaultLess>::value &&
+ !std::is_same<Adapted, StringBtreeDefaultGreater>::value,
+ "key_compare_adapter shouldn't have string-adapted this comparator.");
}
-TEST(Btree, KeyCompareToAdapter) {
- AssertKeyCompareToAdapted<std::less<std::string>, std::string>();
- AssertKeyCompareToAdapted<std::greater<std::string>, std::string>();
- AssertKeyCompareToAdapted<std::less<absl::string_view>, absl::string_view>();
- AssertKeyCompareToAdapted<std::greater<absl::string_view>,
- absl::string_view>();
- AssertKeyCompareToAdapted<std::less<absl::Cord>, absl::Cord>();
- AssertKeyCompareToAdapted<std::greater<absl::Cord>, absl::Cord>();
- AssertKeyCompareToNotAdapted<std::less<int>, int>();
- AssertKeyCompareToNotAdapted<std::greater<int>, int>();
+TEST(Btree, KeyCompareAdapter) {
+ AssertKeyCompareStringAdapted<std::less<std::string>, std::string>();
+ AssertKeyCompareStringAdapted<std::greater<std::string>, std::string>();
+ AssertKeyCompareStringAdapted<std::less<absl::string_view>,
+ absl::string_view>();
+ AssertKeyCompareStringAdapted<std::greater<absl::string_view>,
+ absl::string_view>();
+ AssertKeyCompareStringAdapted<std::less<absl::Cord>, absl::Cord>();
+ AssertKeyCompareStringAdapted<std::greater<absl::Cord>, absl::Cord>();
+ AssertKeyCompareNotStringAdapted<std::less<int>, int>();
+ AssertKeyCompareNotStringAdapted<std::greater<int>, int>();
}
TEST(Btree, RValueInsert) {
@@ -1425,11 +1430,19 @@ TEST(Btree, RValueInsert) {
EXPECT_EQ(tracker.swaps(), 0);
}
-// A btree set with a specific number of values per node.
+template <typename Cmp>
+struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase {
+ using Cmp::Cmp;
+ CheckedCompareOptedOutCmp() {}
+ CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {} // NOLINT
+};
+
+// A btree set with a specific number of values per node. Opt out of
+// checked_compare so that we can expect exact numbers of comparisons.
template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
class SizedBtreeSet
: public btree_set_container<btree<
- set_params<Key, Cmp, std::allocator<Key>,
+ set_params<Key, CheckedCompareOptedOutCmp<Cmp>, std::allocator<Key>,
BtreeNodePeer::GetTargetNodeSize<Key>(TargetValuesPerNode),
/*Multi=*/false>>> {
using Base = typename SizedBtreeSet::btree_set_container;
@@ -1473,8 +1486,10 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
+ EXPECT_EQ(
+ BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -1528,8 +1543,10 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
+ EXPECT_EQ(
+ BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -1708,10 +1725,25 @@ TEST(Btree, StrSplitCompatible) {
EXPECT_EQ(split_set, expected_set);
}
-// We can't use EXPECT_EQ/etc. to compare absl::weak_ordering because they
-// convert literal 0 to int and absl::weak_ordering can only be compared with
-// literal 0. Defining this function allows for avoiding ClangTidy warnings.
-bool Identity(const bool b) { return b; }
+TEST(Btree, KeyComp) {
+ absl::btree_set<int> s;
+ EXPECT_TRUE(s.key_comp()(1, 2));
+ EXPECT_FALSE(s.key_comp()(2, 2));
+ EXPECT_FALSE(s.key_comp()(2, 1));
+
+ absl::btree_map<int, int> m1;
+ EXPECT_TRUE(m1.key_comp()(1, 2));
+ EXPECT_FALSE(m1.key_comp()(2, 2));
+ EXPECT_FALSE(m1.key_comp()(2, 1));
+
+ // Even though we internally adapt the comparator of `m2` to be three-way and
+ // heterogeneous, the comparator we expose through key_comp() is the original
+ // unadapted comparator.
+ absl::btree_map<std::string, int> m2;
+ EXPECT_TRUE(m2.key_comp()("a", "b"));
+ EXPECT_FALSE(m2.key_comp()("b", "b"));
+ EXPECT_FALSE(m2.key_comp()("b", "a"));
+}
TEST(Btree, ValueComp) {
absl::btree_set<int> s;
@@ -1724,13 +1756,29 @@ TEST(Btree, ValueComp) {
EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0)));
EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0)));
+ // Even though we internally adapt the comparator of `m2` to be three-way and
+ // heterogeneous, the comparator we expose through value_comp() is based on
+ // the original unadapted comparator.
absl::btree_map<std::string, int> m2;
- EXPECT_TRUE(Identity(
- m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)) < 0));
- EXPECT_TRUE(Identity(
- m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)) == 0));
- EXPECT_TRUE(Identity(
- m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)) > 0));
+ EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)));
+ EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)));
+ EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)));
+}
+
+// Test that we have the protected members from the std::map::value_compare API.
+// See https://en.cppreference.com/w/cpp/container/map/value_compare.
+TEST(Btree, MapValueCompProtected) {
+ struct key_compare {
+ bool operator()(int l, int r) const { return l < r; }
+ int id;
+ };
+ using value_compare = absl::btree_map<int, int, key_compare>::value_compare;
+ struct value_comp_child : public value_compare {
+ explicit value_comp_child(key_compare kc) : value_compare(kc) {}
+ int GetId() const { return comp.id; }
+ };
+ value_comp_child c(key_compare{10});
+ EXPECT_EQ(c.GetId(), 10);
}
TEST(Btree, DefaultConstruction) {
@@ -2282,7 +2330,9 @@ TEST(Btree, TryEmplaceWithHintWorks) {
};
using Cmp = decltype(cmp);
- absl::btree_map<int, int, Cmp> m(cmp);
+ // Use a map that is opted out of key_compare being adapted so we can expect
+ // strict comparison call limits.
+ absl::btree_map<int, int, CheckedCompareOptedOutCmp<Cmp>> m(cmp);
for (int i = 0; i < 128; ++i) {
m.emplace(i, i);
}
@@ -2437,23 +2487,28 @@ TEST(Btree, EraseIf) {
// Test that erase_if works with all the container types and supports lambdas.
{
absl::btree_set<int> s = {1, 3, 5, 6, 100};
- erase_if(s, [](int k) { return k > 3; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3);
EXPECT_THAT(s, ElementsAre(1, 3));
}
{
absl::btree_multiset<int> s = {1, 3, 3, 5, 6, 6, 100};
- erase_if(s, [](int k) { return k <= 3; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3);
EXPECT_THAT(s, ElementsAre(5, 6, 6, 100));
}
{
absl::btree_map<int, int> m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}};
- erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; });
+ EXPECT_EQ(
+ erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; }),
+ 2);
EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3)));
}
{
absl::btree_multimap<int, int> m = {{1, 1}, {3, 3}, {3, 6},
{6, 6}, {6, 7}, {100, 6}};
- erase_if(m, [](std::pair<const int, int> kv) { return kv.second == 6; });
+ EXPECT_EQ(
+ erase_if(m,
+ [](std::pair<const int, int> kv) { return kv.second == 6; }),
+ 3);
EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7)));
}
// Test that erasing all elements from a large set works and test support for
@@ -2461,15 +2516,29 @@ TEST(Btree, EraseIf) {
{
absl::btree_set<int> s;
for (int i = 0; i < 1000; ++i) s.insert(2 * i);
- erase_if(s, IsEven);
+ EXPECT_EQ(erase_if(s, IsEven), 1000);
EXPECT_THAT(s, IsEmpty());
}
// Test that erase_if supports other format of function pointers.
{
absl::btree_set<int> s = {1, 3, 5, 6, 100};
- erase_if(s, &IsEven);
+ EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, ElementsAre(1, 3, 5));
}
+ // Test that erase_if invokes the predicate once per element.
+ {
+ absl::btree_set<int> s;
+ for (int i = 0; i < 1000; ++i) s.insert(i);
+ int pred_calls = 0;
+ EXPECT_EQ(erase_if(s,
+ [&pred_calls](int k) {
+ ++pred_calls;
+ return k % 2;
+ }),
+ 500);
+ EXPECT_THAT(s, SizeIs(500));
+ EXPECT_EQ(pred_calls, 1000);
+ }
}
TEST(Btree, InsertOrAssign) {
@@ -2893,6 +2962,292 @@ TEST(Btree, AllocMoveConstructor_DifferentAlloc) {
EXPECT_EQ(bytes_used2, original_bytes_used);
}
+bool IntCmp(const int a, const int b) { return a < b; }
+
+TEST(Btree, SupportsFunctionPtrComparator) {
+ absl::btree_set<int, decltype(IntCmp) *> set(IntCmp);
+ set.insert({1, 2, 3});
+ EXPECT_THAT(set, ElementsAre(1, 2, 3));
+ EXPECT_TRUE(set.key_comp()(1, 2));
+ EXPECT_TRUE(set.value_comp()(1, 2));
+
+ absl::btree_map<int, int, decltype(IntCmp) *> map(&IntCmp);
+ map[1] = 1;
+ EXPECT_THAT(map, ElementsAre(Pair(1, 1)));
+ EXPECT_TRUE(map.key_comp()(1, 2));
+ EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2)));
+}
+
+template <typename Compare>
+struct TransparentPassThroughComp {
+ using is_transparent = void;
+
+ // This will fail compilation if we attempt a comparison that Compare does not
+ // support, and the failure will happen inside the function implementation so
+ // it can't be avoided by using SFINAE on this comparator.
+ template <typename T, typename U>
+ bool operator()(const T &lhs, const U &rhs) const {
+ return Compare()(lhs, rhs);
+ }
+};
+
+TEST(Btree,
+ SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) {
+ absl::btree_set<MultiKey, TransparentPassThroughComp<MultiKeyComp>> set;
+ set.insert(MultiKey{1, 2});
+ EXPECT_TRUE(set.contains(1));
+}
+
+TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) {
+ absl::btree_set<MultiKey, MultiKeyComp> set = {{}, MultiKeyComp{}};
+}
+
+#ifndef NDEBUG
+TEST(Btree, InvalidComparatorsCaught) {
+ {
+ struct ZeroAlwaysLessCmp {
+ bool operator()(int lhs, int rhs) const {
+ if (lhs == 0) return true;
+ return lhs < rhs;
+ }
+ };
+ absl::btree_set<int, ZeroAlwaysLessCmp> set;
+ EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
+ }
+ {
+ struct ThreeWayAlwaysLessCmp {
+ absl::weak_ordering operator()(int, int) const {
+ return absl::weak_ordering::less;
+ }
+ };
+ absl::btree_set<int, ThreeWayAlwaysLessCmp> set;
+ EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
+ }
+ {
+ struct SumGreaterZeroCmp {
+ bool operator()(int lhs, int rhs) const {
+ // First, do equivalence correctly - so we can test later condition.
+ if (lhs == rhs) return false;
+ return lhs + rhs > 0;
+ }
+ };
+ absl::btree_set<int, SumGreaterZeroCmp> set;
+ // Note: '!' only needs to be escaped when it's the first character.
+ EXPECT_DEATH(set.insert({0, 1, 2}),
+ R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex");
+ }
+ {
+ struct ThreeWaySumGreaterZeroCmp {
+ absl::weak_ordering operator()(int lhs, int rhs) const {
+ // First, do equivalence correctly - so we can test later condition.
+ if (lhs == rhs) return absl::weak_ordering::equivalent;
+
+ if (lhs + rhs > 0) return absl::weak_ordering::less;
+ if (lhs + rhs == 0) return absl::weak_ordering::equivalent;
+ return absl::weak_ordering::greater;
+ }
+ };
+ absl::btree_set<int, ThreeWaySumGreaterZeroCmp> set;
+ EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
+ }
+}
+#endif
+
+#ifndef _MSC_VER
+// This test crashes on MSVC.
+TEST(Btree, InvalidIteratorUse) {
+ if (!BtreeNodePeer::UsesGenerations<absl::btree_set<int>>())
+ GTEST_SKIP() << "Generation validation for iterators is disabled.";
+
+ {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 10; ++i) set.insert(i);
+ auto it = set.begin();
+ set.erase(it++);
+ EXPECT_DEATH(set.erase(it++), "invalidated iterator");
+ }
+ {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 10; ++i) set.insert(i);
+ auto it = set.insert(20).first;
+ set.insert(30);
+ EXPECT_DEATH(*it, "invalidated iterator");
+ }
+ {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 10000; ++i) set.insert(i);
+ auto it = set.find(5000);
+ ASSERT_NE(it, set.end());
+ set.erase(1);
+ EXPECT_DEATH(*it, "invalidated iterator");
+ }
+}
+#endif
+
+class OnlyConstructibleByAllocator {
+ explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
+
+ public:
+ OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
+ : i_(other.i_) {}
+ OnlyConstructibleByAllocator &operator=(
+ const OnlyConstructibleByAllocator &other) {
+ i_ = other.i_;
+ return *this;
+ }
+ int Get() const { return i_; }
+ bool operator==(int i) const { return i_ == i; }
+
+ private:
+ template <typename T>
+ friend class OnlyConstructibleAllocator;
+
+ int i_;
+};
+
+template <typename T = OnlyConstructibleByAllocator>
+class OnlyConstructibleAllocator : public std::allocator<T> {
+ public:
+ OnlyConstructibleAllocator() = default;
+ template <class U>
+ explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
+
+ void construct(OnlyConstructibleByAllocator *p, int i) {
+ new (p) OnlyConstructibleByAllocator(i);
+ }
+ template <typename Pair>
+ void construct(Pair *p, const int i) {
+ OnlyConstructibleByAllocator only(i);
+ new (p) Pair(std::move(only), i);
+ }
+
+ template <class U>
+ struct rebind {
+ using other = OnlyConstructibleAllocator<U>;
+ };
+};
+
+struct OnlyConstructibleByAllocatorComp {
+ using is_transparent = void;
+ bool operator()(OnlyConstructibleByAllocator a,
+ OnlyConstructibleByAllocator b) const {
+ return a.Get() < b.Get();
+ }
+ bool operator()(int a, OnlyConstructibleByAllocator b) const {
+ return a < b.Get();
+ }
+ bool operator()(OnlyConstructibleByAllocator a, int b) const {
+ return a.Get() < b;
+ }
+};
+
+TEST(Btree, OnlyConstructibleByAllocatorType) {
+ const std::array<int, 2> arr = {3, 4};
+ {
+ absl::btree_set<OnlyConstructibleByAllocator,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ set.insert(arr.begin(), arr.end());
+ EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
+ }
+ {
+ absl::btree_multiset<OnlyConstructibleByAllocator,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ // TODO(ezb): fix insert_multi to allow this to compile.
+ // set.insert(arr.begin(), arr.end());
+ EXPECT_THAT(set, ElementsAre(1, 2));
+ }
+ {
+ absl::btree_map<OnlyConstructibleByAllocator, int,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ map;
+ map.emplace(1);
+ map.emplace_hint(map.end(), 2);
+ map.insert(arr.begin(), arr.end());
+ EXPECT_THAT(map,
+ ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+ }
+ {
+ absl::btree_multimap<OnlyConstructibleByAllocator, int,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ map;
+ map.emplace(1);
+ map.emplace_hint(map.end(), 2);
+ // TODO(ezb): fix insert_multi to allow this to compile.
+ // map.insert(arr.begin(), arr.end());
+ EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2)));
+ }
+}
+
+class NotAssignable {
+ public:
+ explicit NotAssignable(int i) : i_(i) {}
+ NotAssignable(const NotAssignable &other) : i_(other.i_) {}
+ NotAssignable &operator=(NotAssignable &&other) = delete;
+ int Get() const { return i_; }
+ bool operator==(int i) const { return i_ == i; }
+ friend bool operator<(NotAssignable a, NotAssignable b) {
+ return a.i_ < b.i_;
+ }
+
+ private:
+ int i_;
+};
+
+TEST(Btree, NotAssignableType) {
+ {
+ absl::btree_set<NotAssignable> set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ set.insert(NotAssignable(3));
+ set.insert(set.end(), NotAssignable(4));
+ EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
+ set.erase(set.begin());
+ EXPECT_THAT(set, ElementsAre(2, 3, 4));
+ }
+ {
+ absl::btree_multiset<NotAssignable> set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ set.insert(NotAssignable(2));
+ set.insert(set.end(), NotAssignable(3));
+ EXPECT_THAT(set, ElementsAre(1, 2, 2, 3));
+ set.erase(set.begin());
+ EXPECT_THAT(set, ElementsAre(2, 2, 3));
+ }
+ {
+ absl::btree_map<NotAssignable, int> map;
+ map.emplace(NotAssignable(1), 1);
+ map.emplace_hint(map.end(), NotAssignable(2), 2);
+ map.insert({NotAssignable(3), 3});
+ map.insert(map.end(), {NotAssignable(4), 4});
+ EXPECT_THAT(map,
+ ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+ map.erase(map.begin());
+ EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+ }
+ {
+ absl::btree_multimap<NotAssignable, int> map;
+ map.emplace(NotAssignable(1), 1);
+ map.emplace_hint(map.end(), NotAssignable(2), 2);
+ map.insert({NotAssignable(2), 3});
+ map.insert(map.end(), {NotAssignable(3), 3});
+ EXPECT_THAT(map,
+ ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3)));
+ map.erase(map.begin());
+ EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3)));
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index fcb3e545..2aefae3b 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -73,11 +73,6 @@ constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
// uninitialized (e.g. int, int[4], double), and others default-constructed.
// This matches the behavior of c-style arrays and `std::array`, but not
// `std::vector`.
-//
-// Note that `FixedArray` does not provide a public allocator; if it requires a
-// heap allocation, it will do so with global `::operator new[]()` and
-// `::operator delete[]()`, even if T provides class-scope overrides for these
-// operators.
template <typename T, size_t N = kFixedArrayUseDefault,
typename A = std::allocator<T>>
class FixedArray {
@@ -494,12 +489,14 @@ class FixedArray {
Storage storage_;
};
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename T, size_t N, typename A>
constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
template <typename T, size_t N, typename A>
constexpr typename FixedArray<T, N, A>::size_type
FixedArray<T, N, A>::inline_elements;
+#endif
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index 74def0df..e6bdbd9e 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -36,6 +36,7 @@
#include <utility>
#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
@@ -75,6 +76,10 @@ struct FlatHashMapPolicy;
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// NOTE: A `flat_hash_map` stores its value types directly inside its
// implementation array to avoid memory indirection. Because a `flat_hash_map`
// is designed to move data when rehashed, map values will not retain pointer
@@ -356,8 +361,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// `flat_hash_map`.
//
// iterator try_emplace(const_iterator hint,
- // const init_type& k, Args&&... args):
- // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ // const key_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `flat_hash_map` using the position of `hint` as a non-binding suggestion
@@ -541,10 +546,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// erase_if(flat_hash_map<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename K, typename V, typename H, typename E, typename A,
typename Predicate>
-void erase_if(flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename flat_hash_map<K, V, H, E, A>::size_type erase_if(
+ flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index 89ec60c9..263951f1 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -236,33 +236,36 @@ TEST(FlatHashMap, EraseIf) {
// Erase all elements.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return true; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return false; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
Pair(4, 4), Pair(5, 5)));
}
// Erase specific elements.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s,
- [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+ EXPECT_EQ(erase_if(s,
+ [](std::pair<const int, int> kvp) {
+ return kvp.first % 2 == 1;
+ }),
+ 3);
EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
}
// Predicate is function reference.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, FirstIsEven);
+ EXPECT_EQ(erase_if(s, FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
// Predicate is function pointer.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, &FirstIsEven);
+ EXPECT_EQ(erase_if(s, &FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
}
@@ -282,6 +285,32 @@ TEST(FlatHashMap, NodeHandleMutableKeyAccess) {
}
#endif
+TEST(FlatHashMap, Reserve) {
+ // Verify that if we reserve(size() + n) then we can perform n insertions
+ // without a rehash, i.e., without invalidating any references.
+ for (size_t trial = 0; trial < 20; ++trial) {
+ for (size_t initial = 3; initial < 100; ++initial) {
+ // Fill in `initial` entries, then erase 2 of them, then reserve space for
+ // two inserts and check for reference stability while doing the inserts.
+ flat_hash_map<size_t, size_t> map;
+ for (size_t i = 0; i < initial; ++i) {
+ map[i] = i;
+ }
+ map.erase(0);
+ map.erase(1);
+ map.reserve(map.size() + 2);
+ size_t& a2 = map[2];
+ // In the event of a failure, asan will complain in one of these two
+ // assignments.
+ map[initial] = a2;
+ map[initial + 1] = a2;
+ // Fail even when not under asan:
+ size_t& a2new = map[2];
+ EXPECT_EQ(&a2, &a2new);
+ }
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index 6b89da65..4938c703 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -67,11 +67,15 @@ struct FlatHashSetPolicy;
//
// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
// fundamental and Abseil types that support the `absl::Hash` framework have a
-// compatible equality operator for comparing insertions into `flat_hash_map`.
+// compatible equality operator for comparing insertions into `flat_hash_set`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
// array to avoid memory indirection. Because a `flat_hash_set` is designed to
// move data when rehashed, set keys will not retain pointer stability. If you
@@ -106,7 +110,7 @@ class flat_hash_set
public:
// Constructors and Assignment Operators
//
- // A flat_hash_set supports the same overload set as `std::unordered_map`
+ // A flat_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
@@ -173,7 +177,7 @@ class flat_hash_set
// available within the `flat_hash_set`.
//
// NOTE: this member function is particular to `absl::flat_hash_set` and is
- // not provided in the `std::unordered_map` API.
+ // not provided in the `std::unordered_set` API.
using Base::capacity;
// flat_hash_set::empty()
@@ -332,7 +336,7 @@ class flat_hash_set
// flat_hash_set::swap(flat_hash_set& other)
//
// Exchanges the contents of this `flat_hash_set` with those of the `other`
- // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // flat hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_set` remain valid, excepting
@@ -340,7 +344,7 @@ class flat_hash_set
//
// `swap()` requires that the flat hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
- // non-member `swap()`. If the map's allocator has
+ // non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
@@ -395,14 +399,14 @@ class flat_hash_set
// flat_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_set`. Note that
- // because a flat hash map contains all elements within its internal storage,
+ // because a flat hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_set`.
using Base::bucket_count;
// flat_hash_set::load_factor()
//
// Returns the current load factor of the `flat_hash_set` (the average number
- // of slots occupied with a value within the hash map).
+ // of slots occupied with a value within the hash set).
using Base::load_factor;
// flat_hash_set::max_load_factor()
@@ -443,9 +447,11 @@ class flat_hash_set
// erase_if(flat_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename T, typename H, typename E, typename A, typename Predicate>
-void erase_if(flat_hash_set<T, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename flat_hash_set<T, H, E, A>::size_type erase_if(
+ flat_hash_set<T, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index 8f6f9944..b6a72a20 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -143,31 +143,31 @@ TEST(FlatHashSet, EraseIf) {
// Erase all elements.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return true; });
+ EXPECT_EQ(erase_if(s, [](int) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return false; });
+ EXPECT_EQ(erase_if(s, [](int) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
}
// Erase specific elements.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int k) { return k % 2 == 1; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3);
EXPECT_THAT(s, UnorderedElementsAre(2, 4));
}
// Predicate is function reference.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, IsEven);
+ EXPECT_EQ(erase_if(s, IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
// Predicate is function pointer.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, &IsEven);
+ EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
}
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 7c182342..bc1c4a77 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -36,7 +36,6 @@
#define ABSL_CONTAINER_INLINED_VECTOR_H_
#include <algorithm>
-#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
@@ -72,37 +71,43 @@ class InlinedVector {
using Storage = inlined_vector_internal::Storage<T, N, A>;
- using AllocatorTraits = typename Storage::AllocatorTraits;
- using RValueReference = typename Storage::RValueReference;
- using MoveIterator = typename Storage::MoveIterator;
- using IsMemcpyOk = typename Storage::IsMemcpyOk;
+ template <typename TheA>
+ using AllocatorTraits = inlined_vector_internal::AllocatorTraits<TheA>;
+ template <typename TheA>
+ using MoveIterator = inlined_vector_internal::MoveIterator<TheA>;
+ template <typename TheA>
+ using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<TheA>;
- template <typename Iterator>
+ template <typename TheA, typename Iterator>
using IteratorValueAdapter =
- typename Storage::template IteratorValueAdapter<Iterator>;
- using CopyValueAdapter = typename Storage::CopyValueAdapter;
- using DefaultValueAdapter = typename Storage::DefaultValueAdapter;
+ inlined_vector_internal::IteratorValueAdapter<TheA, Iterator>;
+ template <typename TheA>
+ using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter<TheA>;
+ template <typename TheA>
+ using DefaultValueAdapter =
+ inlined_vector_internal::DefaultValueAdapter<TheA>;
template <typename Iterator>
using EnableIfAtLeastForwardIterator = absl::enable_if_t<
- inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+ inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
template <typename Iterator>
using DisableIfAtLeastForwardIterator = absl::enable_if_t<
- !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+ !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
public:
- using allocator_type = typename Storage::allocator_type;
- using value_type = typename Storage::value_type;
- using pointer = typename Storage::pointer;
- using const_pointer = typename Storage::const_pointer;
- using size_type = typename Storage::size_type;
- using difference_type = typename Storage::difference_type;
- using reference = typename Storage::reference;
- using const_reference = typename Storage::const_reference;
- using iterator = typename Storage::iterator;
- using const_iterator = typename Storage::const_iterator;
- using reverse_iterator = typename Storage::reverse_iterator;
- using const_reverse_iterator = typename Storage::const_reverse_iterator;
+ using allocator_type = A;
+ using value_type = inlined_vector_internal::ValueType<A>;
+ using pointer = inlined_vector_internal::Pointer<A>;
+ using const_pointer = inlined_vector_internal::ConstPointer<A>;
+ using size_type = inlined_vector_internal::SizeType<A>;
+ using difference_type = inlined_vector_internal::DifferenceType<A>;
+ using reference = inlined_vector_internal::Reference<A>;
+ using const_reference = inlined_vector_internal::ConstReference<A>;
+ using iterator = inlined_vector_internal::Iterator<A>;
+ using const_iterator = inlined_vector_internal::ConstIterator<A>;
+ using reverse_iterator = inlined_vector_internal::ReverseIterator<A>;
+ using const_reverse_iterator =
+ inlined_vector_internal::ConstReverseIterator<A>;
// ---------------------------------------------------------------------------
// InlinedVector Constructors and Destructor
@@ -111,28 +116,28 @@ class InlinedVector {
// Creates an empty inlined vector with a value-initialized allocator.
InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {}
- // Creates an empty inlined vector with a copy of `alloc`.
- explicit InlinedVector(const allocator_type& alloc) noexcept
- : storage_(alloc) {}
+ // Creates an empty inlined vector with a copy of `allocator`.
+ explicit InlinedVector(const allocator_type& allocator) noexcept
+ : storage_(allocator) {}
// Creates an inlined vector with `n` copies of `value_type()`.
explicit InlinedVector(size_type n,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
- storage_.Initialize(DefaultValueAdapter(), n);
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(DefaultValueAdapter<A>(), n);
}
// Creates an inlined vector with `n` copies of `v`.
InlinedVector(size_type n, const_reference v,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
- storage_.Initialize(CopyValueAdapter(v), n);
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(CopyValueAdapter<A>(std::addressof(v)), n);
}
// Creates an inlined vector with copies of the elements of `list`.
InlinedVector(std::initializer_list<value_type> list,
- const allocator_type& alloc = allocator_type())
- : InlinedVector(list.begin(), list.end(), alloc) {}
+ const allocator_type& allocator = allocator_type())
+ : InlinedVector(list.begin(), list.end(), allocator) {}
// Creates an inlined vector with elements constructed from the provided
// forward iterator range [`first`, `last`).
@@ -141,35 +146,36 @@ class InlinedVector {
// this constructor with two integral arguments and a call to the above
// `InlinedVector(size_type, const_reference)` constructor.
template <typename ForwardIterator,
- EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
InlinedVector(ForwardIterator first, ForwardIterator last,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
- storage_.Initialize(IteratorValueAdapter<ForwardIterator>(first),
- std::distance(first, last));
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(IteratorValueAdapter<A, ForwardIterator>(first),
+ static_cast<size_t>(std::distance(first, last)));
}
// Creates an inlined vector with elements constructed from the provided input
// iterator range [`first`, `last`).
template <typename InputIterator,
- DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
InlinedVector(InputIterator first, InputIterator last,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
std::copy(first, last, std::back_inserter(*this));
}
// Creates an inlined vector by copying the contents of `other` using
// `other`'s allocator.
InlinedVector(const InlinedVector& other)
- : InlinedVector(other, *other.storage_.GetAllocPtr()) {}
+ : InlinedVector(other, other.storage_.GetAllocator()) {}
- // Creates an inlined vector by copying the contents of `other` using `alloc`.
- InlinedVector(const InlinedVector& other, const allocator_type& alloc)
- : storage_(alloc) {
+ // Creates an inlined vector by copying the contents of `other` using the
+ // provided `allocator`.
+ InlinedVector(const InlinedVector& other, const allocator_type& allocator)
+ : storage_(allocator) {
if (other.empty()) {
// Empty; nothing to do.
- } else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
+ } else if (IsMemcpyOk<A>::value && !other.storage_.GetIsAllocated()) {
// Memcpy-able and do not need allocation.
storage_.MemcpyFrom(other.storage_);
} else {
@@ -194,23 +200,23 @@ class InlinedVector {
InlinedVector(InlinedVector&& other) noexcept(
absl::allocator_is_nothrow<allocator_type>::value ||
std::is_nothrow_move_constructible<value_type>::value)
- : storage_(*other.storage_.GetAllocPtr()) {
- if (IsMemcpyOk::value) {
+ : storage_(other.storage_.GetAllocator()) {
+ if (IsMemcpyOk<A>::value) {
storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
} else if (other.storage_.GetIsAllocated()) {
- storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
- other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
} else {
- IteratorValueAdapter<MoveIterator> other_values(
- MoveIterator(other.storage_.GetInlinedData()));
+ IteratorValueAdapter<A, MoveIterator<A>> other_values(
+ MoveIterator<A>(other.storage_.GetInlinedData()));
- inlined_vector_internal::ConstructElements(
- storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values,
+ inlined_vector_internal::ConstructElements<A>(
+ storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
other.storage_.GetSize());
storage_.SetInlinedSize(other.storage_.GetSize());
@@ -218,30 +224,32 @@ class InlinedVector {
}
// Creates an inlined vector by moving in the contents of `other` with a copy
- // of `alloc`.
+ // of `allocator`.
//
- // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other`
+ // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other`
// contains allocated memory, this move constructor will still allocate. Since
// allocation is performed, this constructor can only be `noexcept` if the
// specified allocator is also `noexcept`.
- InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept(
- absl::allocator_is_nothrow<allocator_type>::value)
- : storage_(alloc) {
- if (IsMemcpyOk::value) {
+ InlinedVector(
+ InlinedVector&& other,
+ const allocator_type&
+ allocator) noexcept(absl::allocator_is_nothrow<allocator_type>::value)
+ : storage_(allocator) {
+ if (IsMemcpyOk<A>::value) {
storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
- } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) &&
+ } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
other.storage_.GetIsAllocated()) {
- storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
- other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
} else {
- storage_.Initialize(
- IteratorValueAdapter<MoveIterator>(MoveIterator(other.data())),
- other.size());
+ storage_.Initialize(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.data())),
+ other.size());
}
}
@@ -442,7 +450,7 @@ class InlinedVector {
// `InlinedVector::get_allocator()`
//
// Returns a copy of the inlined vector's allocator.
- allocator_type get_allocator() const { return *storage_.GetAllocPtr(); }
+ allocator_type get_allocator() const { return storage_.GetAllocator(); }
// ---------------------------------------------------------------------------
// InlinedVector Member Mutators
@@ -476,16 +484,16 @@ class InlinedVector {
// unspecified state.
InlinedVector& operator=(InlinedVector&& other) {
if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
- if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) {
- inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
- size());
+ if (IsMemcpyOk<A>::value || other.storage_.GetIsAllocated()) {
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
} else {
- storage_.Assign(IteratorValueAdapter<MoveIterator>(
- MoveIterator(other.storage_.GetInlinedData())),
+ storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.storage_.GetInlinedData())),
other.size());
}
}
@@ -497,7 +505,7 @@ class InlinedVector {
//
// Replaces the contents of the inlined vector with `n` copies of `v`.
void assign(size_type n, const_reference v) {
- storage_.Assign(CopyValueAdapter(v), n);
+ storage_.Assign(CopyValueAdapter<A>(std::addressof(v)), n);
}
// Overload of `InlinedVector::assign(...)` that replaces the contents of the
@@ -511,10 +519,10 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "forward" category or better.
template <typename ForwardIterator,
- EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
void assign(ForwardIterator first, ForwardIterator last) {
- storage_.Assign(IteratorValueAdapter<ForwardIterator>(first),
- std::distance(first, last));
+ storage_.Assign(IteratorValueAdapter<A, ForwardIterator>(first),
+ static_cast<size_t>(std::distance(first, last)));
}
// Overload of `InlinedVector::assign(...)` to replace the contents of the
@@ -522,7 +530,7 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "input" category.
template <typename InputIterator,
- DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
void assign(InputIterator first, InputIterator last) {
size_type i = 0;
for (; i < size() && first != last; ++i, static_cast<void>(++first)) {
@@ -541,7 +549,7 @@ class InlinedVector {
// is larger than `size()`, new elements are value-initialized.
void resize(size_type n) {
ABSL_HARDENING_ASSERT(n <= max_size());
- storage_.Resize(DefaultValueAdapter(), n);
+ storage_.Resize(DefaultValueAdapter<A>(), n);
}
// Overload of `InlinedVector::resize(...)` that resizes the inlined vector to
@@ -551,7 +559,7 @@ class InlinedVector {
// is larger than `size()`, new elements are copied-constructed from `v`.
void resize(size_type n, const_reference v) {
ABSL_HARDENING_ASSERT(n <= max_size());
- storage_.Resize(CopyValueAdapter(v), n);
+ storage_.Resize(CopyValueAdapter<A>(std::addressof(v)), n);
}
// `InlinedVector::insert(...)`
@@ -564,7 +572,7 @@ class InlinedVector {
// Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
// move semantics, returning an `iterator` to the newly inserted element.
- iterator insert(const_iterator pos, RValueReference v) {
+ iterator insert(const_iterator pos, value_type&& v) {
return emplace(pos, std::move(v));
}
@@ -577,7 +585,20 @@ class InlinedVector {
if (ABSL_PREDICT_TRUE(n != 0)) {
value_type dealias = v;
- return storage_.Insert(pos, CopyValueAdapter(dealias), n);
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+ // It appears that GCC thinks that since `pos` is a const pointer and may
+ // point to uninitialized memory at this point, a warning should be
+ // issued. But `pos` is actually only used to compute an array index to
+ // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+ return storage_.Insert(pos, CopyValueAdapter<A>(std::addressof(dealias)),
+ n);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
} else {
return const_cast<iterator>(pos);
}
@@ -596,14 +617,15 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "forward" category or better.
template <typename ForwardIterator,
- EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
iterator insert(const_iterator pos, ForwardIterator first,
ForwardIterator last) {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos <= end());
if (ABSL_PREDICT_TRUE(first != last)) {
- return storage_.Insert(pos, IteratorValueAdapter<ForwardIterator>(first),
+ return storage_.Insert(pos,
+ IteratorValueAdapter<A, ForwardIterator>(first),
std::distance(first, last));
} else {
return const_cast<iterator>(pos);
@@ -616,7 +638,7 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "input" category.
template <typename InputIterator,
- DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos <= end());
@@ -640,8 +662,8 @@ class InlinedVector {
value_type dealias(std::forward<Args>(args)...);
return storage_.Insert(pos,
- IteratorValueAdapter<MoveIterator>(
- MoveIterator(std::addressof(dealias))),
+ IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(std::addressof(dealias))),
1);
}
@@ -661,7 +683,7 @@ class InlinedVector {
// Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()`
// using move semantics.
- void push_back(RValueReference v) {
+ void push_back(value_type&& v) {
static_cast<void>(emplace_back(std::move(v)));
}
@@ -671,7 +693,7 @@ class InlinedVector {
void pop_back() noexcept {
ABSL_HARDENING_ASSERT(!empty());
- AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1));
+ AllocatorTraits<A>::destroy(storage_.GetAllocator(), data() + (size() - 1));
storage_.SubtractSize(1);
}
@@ -710,8 +732,8 @@ class InlinedVector {
// Destroys all elements in the inlined vector, setting the size to `0` and
// deallocating any held memory.
void clear() noexcept {
- inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
- size());
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
storage_.SetInlinedSize(0);
@@ -724,15 +746,12 @@ class InlinedVector {
// `InlinedVector::shrink_to_fit()`
//
- // Reduces memory usage by freeing unused memory. After being called, calls to
- // `capacity()` will be equal to `max(N, size())`.
- //
- // If `size() <= N` and the inlined vector contains allocated memory, the
- // elements will all be moved to the inlined space and the allocated memory
- // will be deallocated.
+ // Attempts to reduce memory usage by moving elements to (or keeping elements
+ // in) the smallest available buffer sufficient for containing `size()`
+ // elements.
//
- // If `size() > N` and `size() < capacity()`, the elements will be moved to a
- // smaller allocation.
+ // If `size()` is sufficiently small, the elements will be moved into (or kept
+ // in) the inlined space.
void shrink_to_fit() {
if (storage_.GetIsAllocated()) {
storage_.ShrinkToFit();
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 98aff334..4c1ba04a 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -1545,17 +1545,18 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) {
}
}
-REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors,
- CountConstructorsDestructorsOnCopyConstruction,
- CountConstructorsDestructorsOnMoveConstruction,
- CountConstructorsDestructorsOnAssignment,
- CountConstructorsDestructorsOnMoveAssignment,
- CountElemAssignInlineBacking, RangedConstructor,
- RangedAssign, InitializerListAssign);
+REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors,
+ CountConstructorsDestructorsOnCopyConstruction,
+ CountConstructorsDestructorsOnMoveConstruction,
+ CountConstructorsDestructorsOnAssignment,
+ CountConstructorsDestructorsOnMoveAssignment,
+ CountElemAssignInlineBacking, RangedConstructor,
+ RangedAssign, InitializerListAssign);
using InstanceTypes =
::testing::Types<CopyableOnlyInstance, CopyableMovableInstance>;
-INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest,
+ InstanceTypes);
TEST(DynamicVec, DynamicVecCompiles) {
DynamicVec v;
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index 0bb38366..01f4e749 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -58,6 +58,7 @@
#include <type_traits>
#include <utility>
+#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
@@ -74,12 +75,24 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_MEMORY_SANITIZER)
+// When compiled in sanitizer mode, we add generation integers to the nodes and
+// iterators. When iterators are used, we validate that the container has not
+// been mutated since the iterator was constructed.
+#define ABSL_BTREE_ENABLE_GENERATIONS
+#endif
+
+template <typename Compare, typename T, typename U>
+using compare_result_t = absl::result_of_t<const Compare(const T &, const U &)>;
+
// A helper class that indicates if the Compare parameter is a key-compare-to
// comparator.
template <typename Compare, typename T>
using btree_is_key_compare_to =
- std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>,
- absl::weak_ordering>;
+ std::is_convertible<compare_result_t<Compare, T, T>, absl::weak_ordering>;
struct StringBtreeDefaultLess {
using is_transparent = void;
@@ -88,7 +101,12 @@ struct StringBtreeDefaultLess {
// Compatibility constructor.
StringBtreeDefaultLess(std::less<std::string>) {} // NOLINT
- StringBtreeDefaultLess(std::less<string_view>) {} // NOLINT
+ StringBtreeDefaultLess(std::less<absl::string_view>) {} // NOLINT
+
+ // Allow converting to std::less for use in key_comp()/value_comp().
+ explicit operator std::less<std::string>() const { return {}; }
+ explicit operator std::less<absl::string_view>() const { return {}; }
+ explicit operator std::less<absl::Cord>() const { return {}; }
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
@@ -115,7 +133,12 @@ struct StringBtreeDefaultGreater {
StringBtreeDefaultGreater() = default;
StringBtreeDefaultGreater(std::greater<std::string>) {} // NOLINT
- StringBtreeDefaultGreater(std::greater<string_view>) {} // NOLINT
+ StringBtreeDefaultGreater(std::greater<absl::string_view>) {} // NOLINT
+
+ // Allow converting to std::greater for use in key_comp()/value_comp().
+ explicit operator std::greater<std::string>() const { return {}; }
+ explicit operator std::greater<absl::string_view>() const { return {}; }
+ explicit operator std::greater<absl::Cord>() const { return {}; }
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
@@ -136,49 +159,140 @@ struct StringBtreeDefaultGreater {
}
};
-// A helper class to convert a boolean comparison into a three-way "compare-to"
-// comparison that returns an `absl::weak_ordering`. This helper
-// class is specialized for less<std::string>, greater<std::string>,
-// less<string_view>, greater<string_view>, less<absl::Cord>, and
-// greater<absl::Cord>.
-//
-// key_compare_to_adapter is provided so that btree users
-// automatically get the more efficient compare-to code when using common
-// Abseil string types with common comparison functors.
-// These string-like specializations also turn on heterogeneous lookup by
-// default.
+// See below comments for checked_compare.
+template <typename Compare, bool is_class = std::is_class<Compare>::value>
+struct checked_compare_base : Compare {
+ using Compare::Compare;
+ explicit checked_compare_base(Compare c) : Compare(std::move(c)) {}
+ const Compare &comp() const { return *this; }
+};
template <typename Compare>
-struct key_compare_to_adapter {
- using type = Compare;
+struct checked_compare_base<Compare, false> {
+ explicit checked_compare_base(Compare c) : compare(std::move(c)) {}
+ const Compare &comp() const { return compare; }
+ Compare compare;
+};
+
+// A mechanism for opting out of checked_compare for use only in btree_test.cc.
+struct BtreeTestOnlyCheckedCompareOptOutBase {};
+
+// A helper class to adapt the specified comparator for two use cases:
+// (1) When using common Abseil string types with common comparison functors,
+// convert a boolean comparison into a three-way comparison that returns an
+// `absl::weak_ordering`. This helper class is specialized for
+// less<std::string>, greater<std::string>, less<string_view>,
+// greater<string_view>, less<absl::Cord>, and greater<absl::Cord>.
+// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see
+// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever
+// a comparison is made, we will make assertions to verify that the comparator
+// is valid.
+template <typename Compare, typename Key>
+struct key_compare_adapter {
+ // Inherit from checked_compare_base to support function pointers and also
+ // keep empty-base-optimization (EBO) support for classes.
+ // Note: we can't use CompressedTuple here because that would interfere
+ // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a
+ // CompressedTuple and nested `CompressedTuple`s don't support EBO.
+ // TODO(b/214288561): use CompressedTuple instead once it supports EBO for
+ // nested `CompressedTuple`s.
+ struct checked_compare : checked_compare_base<Compare> {
+ private:
+ using Base = typename checked_compare::checked_compare_base;
+ using Base::comp;
+
+ // If possible, returns whether `t` is equivalent to itself. We can only do
+ // this for `Key`s because we can't be sure that it's safe to call
+ // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a
+ // compilation failure inside the implementation of the comparison operator.
+ bool is_self_equivalent(const Key &k) const {
+ // Note: this works for both boolean and three-way comparators.
+ return comp()(k, k) == 0;
+ }
+ // If we can't compare `t` with itself, returns true unconditionally.
+ template <typename T>
+ bool is_self_equivalent(const T &) const {
+ return true;
+ }
+
+ public:
+ using Base::Base;
+ checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT
+
+ // Allow converting to Compare for use in key_comp()/value_comp().
+ explicit operator Compare() const { return comp(); }
+
+ template <typename T, typename U,
+ absl::enable_if_t<
+ std::is_same<bool, compare_result_t<Compare, T, U>>::value,
+ int> = 0>
+ bool operator()(const T &lhs, const U &rhs) const {
+ // NOTE: if any of these assertions fail, then the comparator does not
+ // establish a strict-weak-ordering (see
+ // https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(is_self_equivalent(lhs));
+ assert(is_self_equivalent(rhs));
+ const bool lhs_comp_rhs = comp()(lhs, rhs);
+ assert(!lhs_comp_rhs || !comp()(rhs, lhs));
+ return lhs_comp_rhs;
+ }
+
+ template <
+ typename T, typename U,
+ absl::enable_if_t<std::is_convertible<compare_result_t<Compare, T, U>,
+ absl::weak_ordering>::value,
+ int> = 0>
+ absl::weak_ordering operator()(const T &lhs, const U &rhs) const {
+ // NOTE: if any of these assertions fail, then the comparator does not
+ // establish a strict-weak-ordering (see
+ // https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(is_self_equivalent(lhs));
+ assert(is_self_equivalent(rhs));
+ const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs);
+#ifndef NDEBUG
+ const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs);
+ if (lhs_comp_rhs > 0) {
+ assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0");
+ } else if (lhs_comp_rhs == 0) {
+ assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0");
+ } else {
+ assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
+ }
+#endif
+ return lhs_comp_rhs;
+ }
+ };
+ using type = absl::conditional_t<
+ std::is_base_of<BtreeTestOnlyCheckedCompareOptOutBase, Compare>::value,
+ Compare, checked_compare>;
};
template <>
-struct key_compare_to_adapter<std::less<std::string>> {
+struct key_compare_adapter<std::less<std::string>, std::string> {
using type = StringBtreeDefaultLess;
};
template <>
-struct key_compare_to_adapter<std::greater<std::string>> {
+struct key_compare_adapter<std::greater<std::string>, std::string> {
using type = StringBtreeDefaultGreater;
};
template <>
-struct key_compare_to_adapter<std::less<absl::string_view>> {
+struct key_compare_adapter<std::less<absl::string_view>, absl::string_view> {
using type = StringBtreeDefaultLess;
};
template <>
-struct key_compare_to_adapter<std::greater<absl::string_view>> {
+struct key_compare_adapter<std::greater<absl::string_view>, absl::string_view> {
using type = StringBtreeDefaultGreater;
};
template <>
-struct key_compare_to_adapter<std::less<absl::Cord>> {
+struct key_compare_adapter<std::less<absl::Cord>, absl::Cord> {
using type = StringBtreeDefaultLess;
};
template <>
-struct key_compare_to_adapter<std::greater<absl::Cord>> {
+struct key_compare_adapter<std::greater<absl::Cord>, absl::Cord> {
using type = StringBtreeDefaultGreater;
};
@@ -214,19 +328,70 @@ struct prefers_linear_node_search<
T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
: T::absl_btree_prefer_linear_node_search {};
+template <typename Compare, typename Key>
+constexpr bool compare_has_valid_result_type() {
+ using compare_result_type = compare_result_t<Compare, Key, Key>;
+ return std::is_same<compare_result_type, bool>::value ||
+ std::is_convertible<compare_result_type, absl::weak_ordering>::value;
+}
+
+template <typename original_key_compare, typename value_type>
+class map_value_compare {
+ template <typename Params>
+ friend class btree;
+
+ // Note: this `protected` is part of the API of std::map::value_compare. See
+ // https://en.cppreference.com/w/cpp/container/map/value_compare.
+ protected:
+ explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {}
+
+ original_key_compare comp; // NOLINT
+
+ public:
+ auto operator()(const value_type &lhs, const value_type &rhs) const
+ -> decltype(comp(lhs.first, rhs.first)) {
+ return comp(lhs.first, rhs.first);
+ }
+};
+
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
- bool Multi, typename SlotPolicy>
+ bool IsMulti, bool IsMap, typename SlotPolicy>
struct common_params {
+ using original_key_compare = Compare;
+
// If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator.
- using key_compare = typename key_compare_to_adapter<Compare>::type;
+ // We also adapt the comparator to diagnose invalid comparators in debug mode.
+ // We disable this when `Compare` is invalid in a way that will cause
+ // adaptation to fail (having invalid return type) so that we can give a
+ // better compilation failure in static_assert_validation. If we don't do
+ // this, then there will be cascading compilation failures that are confusing
+ // for users.
+ using key_compare =
+ absl::conditional_t<!compare_has_valid_result_type<Compare, Key>(),
+ Compare,
+ typename key_compare_adapter<Compare, Key>::type>;
+
+ static constexpr bool kIsKeyCompareStringAdapted =
+ std::is_same<key_compare, StringBtreeDefaultLess>::value ||
+ std::is_same<key_compare, StringBtreeDefaultGreater>::value;
+ static constexpr bool kIsKeyCompareTransparent =
+ IsTransparent<original_key_compare>::value ||
+ kIsKeyCompareStringAdapted;
+ static constexpr bool kEnableGenerations =
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ true;
+#else
+ false;
+#endif
+
// A type which indicates if we have a key-compare-to functor or a plain old
// key-compare functor.
using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
using allocator_type = Alloc;
using key_type = Key;
- using size_type = std::make_signed<size_t>::type;
+ using size_type = size_t;
using difference_type = ptrdiff_t;
using slot_policy = SlotPolicy;
@@ -238,6 +403,12 @@ struct common_params {
using reference = value_type &;
using const_reference = const value_type &;
+ using value_compare =
+ absl::conditional_t<IsMap,
+ map_value_compare<original_key_compare, value_type>,
+ original_key_compare>;
+ using is_map_container = std::integral_constant<bool, IsMap>;
+
// For the given lookup key type, returns whether we can have multiple
// equivalent keys in the btree. If this is a multi-container, then we can.
// Otherwise, we can have multiple equivalent keys only if all of the
@@ -248,27 +419,25 @@ struct common_params {
// that we know has the same equivalence classes for all lookup types.
template <typename LookupKey>
constexpr static bool can_have_multiple_equivalent_keys() {
- return Multi ||
- (IsTransparent<key_compare>::value &&
- !std::is_same<LookupKey, Key>::value &&
- !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
- !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
+ return IsMulti || (IsTransparent<key_compare>::value &&
+ !std::is_same<LookupKey, Key>::value &&
+ !kIsKeyCompareStringAdapted);
}
enum {
kTargetNodeSize = TargetNodeSize,
- // Upper bound for the available space for values. This is largest for leaf
+ // Upper bound for the available space for slots. This is largest for leaf
// nodes, which have overhead of at least a pointer + 4 bytes (for storing
// 3 field_types and an enum).
- kNodeValueSpace =
+ kNodeSlotSpace =
TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
};
- // This is an integral type large enough to hold as many
- // ValueSize-values as will fit a node of TargetNodeSize bytes.
+ // This is an integral type large enough to hold as many slots as will fit a
+ // node of TargetNodeSize bytes.
using node_count_type =
- absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
+ absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) >
(std::numeric_limits<uint8_t>::max)()),
uint16_t, uint8_t>; // NOLINT
@@ -291,116 +460,10 @@ struct common_params {
slot_policy::destroy(alloc, slot);
}
static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
- construct(alloc, new_slot, old_slot);
- destroy(alloc, old_slot);
- }
- static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
- slot_policy::swap(alloc, a, b);
- }
- static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
- slot_policy::move(alloc, src, dest);
- }
-};
-
-// A parameters structure for holding the type parameters for a btree_map.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Data, typename Compare, typename Alloc,
- int TargetNodeSize, bool Multi>
-struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
- map_slot_policy<Key, Data>> {
- using super_type = typename map_params::common_params;
- using mapped_type = Data;
- // This type allows us to move keys when it is safe to do so. It is safe
- // for maps in which value_type and mutable_value_type are layout compatible.
- using slot_policy = typename super_type::slot_policy;
- using slot_type = typename super_type::slot_type;
- using value_type = typename super_type::value_type;
- using init_type = typename super_type::init_type;
-
- using key_compare = typename super_type::key_compare;
- // Inherit from key_compare for empty base class optimization.
- struct value_compare : private key_compare {
- value_compare() = default;
- explicit value_compare(const key_compare &cmp) : key_compare(cmp) {}
-
- template <typename T, typename U>
- auto operator()(const T &left, const U &right) const
- -> decltype(std::declval<key_compare>()(left.first, right.first)) {
- return key_compare::operator()(left.first, right.first);
- }
- };
- using is_map_container = std::true_type;
-
- template <typename V>
- static auto key(const V &value) -> decltype(value.first) {
- return value.first;
- }
- static const Key &key(const slot_type *s) { return slot_policy::key(s); }
- static const Key &key(slot_type *s) { return slot_policy::key(s); }
- // For use in node handle.
- static auto mutable_key(slot_type *s)
- -> decltype(slot_policy::mutable_key(s)) {
- return slot_policy::mutable_key(s);
- }
- static mapped_type &value(value_type *value) { return value->second; }
-};
-
-// This type implements the necessary functions from the
-// absl::container_internal::slot_type interface.
-template <typename Key>
-struct set_slot_policy {
- using slot_type = Key;
- using value_type = Key;
- using mutable_value_type = Key;
-
- static value_type &element(slot_type *slot) { return *slot; }
- static const value_type &element(const slot_type *slot) { return *slot; }
-
- template <typename Alloc, class... Args>
- static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
- absl::allocator_traits<Alloc>::construct(*alloc, slot,
- std::forward<Args>(args)...);
- }
-
- template <typename Alloc>
- static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
- absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
- }
-
- template <typename Alloc>
- static void destroy(Alloc *alloc, slot_type *slot) {
- absl::allocator_traits<Alloc>::destroy(*alloc, slot);
- }
-
- template <typename Alloc>
- static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
- using std::swap;
- swap(*a, *b);
- }
-
- template <typename Alloc>
- static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
- *dest = std::move(*src);
+ slot_policy::transfer(alloc, new_slot, old_slot);
}
};
-// A parameters structure for holding the type parameters for a btree_set.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
- bool Multi>
-struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
- set_slot_policy<Key>> {
- using value_type = Key;
- using slot_type = typename set_params::common_params::slot_type;
- using value_compare = typename set_params::common_params::key_compare;
- using is_map_container = std::false_type;
-
- template <typename V>
- static const V &key(const V &value) { return value; }
- static const Key &key(const slot_type *slot) { return *slot; }
- static const Key &key(slot_type *slot) { return *slot; }
-};
-
// An adapter class that converts a lower-bound compare into an upper-bound
// compare. Note: there is no need to make a version of this adapter specialized
// for key-compare-to functors because the upper-bound (the first value greater
@@ -435,8 +498,8 @@ struct SearchResult {
template <typename V>
struct SearchResult<V, false> {
SearchResult() {}
- explicit SearchResult(V value) : value(value) {}
- SearchResult(V value, MatchKind /*match*/) : value(value) {}
+ explicit SearchResult(V v) : value(v) {}
+ SearchResult(V v, MatchKind /*match*/) : value(v) {}
V value;
@@ -453,6 +516,7 @@ class btree_node {
using field_type = typename Params::node_count_type;
using allocator_type = typename Params::allocator_type;
using slot_type = typename Params::slot_type;
+ using original_key_compare = typename Params::original_key_compare;
public:
using params_type = Params;
@@ -474,21 +538,28 @@ class btree_node {
// - Otherwise, choose binary.
// TODO(ezb): Might make sense to add condition(s) based on node-size.
using use_linear_search = std::integral_constant<
- bool,
- has_linear_node_search_preference<key_compare>::value
- ? prefers_linear_node_search<key_compare>::value
- : has_linear_node_search_preference<key_type>::value
+ bool, has_linear_node_search_preference<original_key_compare>::value
+ ? prefers_linear_node_search<original_key_compare>::value
+ : has_linear_node_search_preference<key_type>::value
? prefers_linear_node_search<key_type>::value
: std::is_arithmetic<key_type>::value &&
- (std::is_same<std::less<key_type>, key_compare>::value ||
+ (std::is_same<std::less<key_type>,
+ original_key_compare>::value ||
std::is_same<std::greater<key_type>,
- key_compare>::value)>;
+ original_key_compare>::value)>;
- // This class is organized by gtl::Layout as if it had the following
- // structure:
+ // This class is organized by absl::container_internal::Layout as if it had
+ // the following structure:
// // A pointer to the node's parent.
// btree_node *parent;
//
+ // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a
+ // // generation integer in order to check that when iterators are
+ // // used, they haven't been invalidated already. Only the generation on
+ // // the root is used, but we have one on each node because whether a node
+ // // is root or not can change.
+ // uint32_t generation;
+ //
// // The position of the node in the node's parent.
// field_type position;
// // The index of the first populated value in `values`.
@@ -535,23 +606,27 @@ class btree_node {
btree_node() = default;
private:
- using layout_type = absl::container_internal::Layout<btree_node *, field_type,
- slot_type, btree_node *>;
+ using layout_type =
+ absl::container_internal::Layout<btree_node *, uint32_t, field_type,
+ slot_type, btree_node *>;
constexpr static size_type SizeWithNSlots(size_type n) {
- return layout_type(/*parent*/ 1,
- /*position, start, finish, max_count*/ 4,
- /*slots*/ n,
- /*children*/ 0)
+ return layout_type(
+ /*parent*/ 1,
+ /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ n,
+ /*children*/ 0)
.AllocSize();
}
- // A lower bound for the overhead of fields other than values in a leaf node.
+ // A lower bound for the overhead of fields other than slots in a leaf node.
constexpr static size_type MinimumOverhead() {
- return SizeWithNSlots(1) - sizeof(value_type);
+ return SizeWithNSlots(1) - sizeof(slot_type);
}
// Compute how many values we can fit onto a leaf node taking into account
// padding.
- constexpr static size_type NodeTargetSlots(const int begin, const int end) {
+ constexpr static size_type NodeTargetSlots(const size_type begin,
+ const size_type end) {
return begin == end ? begin
: SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize
@@ -580,16 +655,20 @@ class btree_node {
// Leaves can have less than kNodeSlots values.
constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
- return layout_type(/*parent*/ 1,
- /*position, start, finish, max_count*/ 4,
- /*slots*/ slot_count,
- /*children*/ 0);
+ return layout_type(
+ /*parent*/ 1,
+ /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ slot_count,
+ /*children*/ 0);
}
constexpr static layout_type InternalLayout() {
- return layout_type(/*parent*/ 1,
- /*position, start, finish, max_count*/ 4,
- /*slots*/ kNodeSlots,
- /*children*/ kNodeSlots + 1);
+ return layout_type(
+ /*parent*/ 1,
+ /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ kNodeSlots,
+ /*children*/ kNodeSlots + 1);
}
constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
return LeafLayout(slot_count).AllocSize();
@@ -603,44 +682,47 @@ class btree_node {
template <size_type N>
inline typename layout_type::template ElementType<N> *GetField() {
// We assert that we don't read from values that aren't there.
- assert(N < 3 || !leaf());
+ assert(N < 4 || is_internal());
return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
}
template <size_type N>
inline const typename layout_type::template ElementType<N> *GetField() const {
- assert(N < 3 || !leaf());
+ assert(N < 4 || is_internal());
return InternalLayout().template Pointer<N>(
reinterpret_cast<const char *>(this));
}
void set_parent(btree_node *p) { *GetField<0>() = p; }
- field_type &mutable_finish() { return GetField<1>()[2]; }
- slot_type *slot(int i) { return &GetField<2>()[i]; }
+ field_type &mutable_finish() { return GetField<2>()[2]; }
+ slot_type *slot(int i) { return &GetField<3>()[i]; }
slot_type *start_slot() { return slot(start()); }
slot_type *finish_slot() { return slot(finish()); }
- const slot_type *slot(int i) const { return &GetField<2>()[i]; }
- void set_position(field_type v) { GetField<1>()[0] = v; }
- void set_start(field_type v) { GetField<1>()[1] = v; }
- void set_finish(field_type v) { GetField<1>()[2] = v; }
+ const slot_type *slot(int i) const { return &GetField<3>()[i]; }
+ void set_position(field_type v) { GetField<2>()[0] = v; }
+ void set_start(field_type v) { GetField<2>()[1] = v; }
+ void set_finish(field_type v) { GetField<2>()[2] = v; }
// This method is only called by the node init methods.
- void set_max_count(field_type v) { GetField<1>()[3] = v; }
+ void set_max_count(field_type v) { GetField<2>()[3] = v; }
public:
// Whether this is a leaf node or not. This value doesn't change after the
// node is created.
- bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
+ bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; }
+ // Whether this is an internal node or not. This value doesn't change after
+ // the node is created.
+ bool is_internal() const { return !is_leaf(); }
// Getter for the position of this node in its parent.
- field_type position() const { return GetField<1>()[0]; }
+ field_type position() const { return GetField<2>()[0]; }
// Getter for the offset of the first value in the `values` array.
field_type start() const {
- // TODO(ezb): when floating storage is implemented, return GetField<1>()[1];
- assert(GetField<1>()[1] == 0);
+ // TODO(ezb): when floating storage is implemented, return GetField<2>()[1];
+ assert(GetField<2>()[1] == 0);
return 0;
}
// Getter for the offset after the last value in the `values` array.
- field_type finish() const { return GetField<1>()[2]; }
+ field_type finish() const { return GetField<2>()[2]; }
// Getters for the number of values stored in this node.
field_type count() const {
@@ -650,7 +732,7 @@ class btree_node {
field_type max_count() const {
// Internal nodes have max_count==kInternalNodeMaxCount.
// Leaf nodes have max_count in [1, kNodeSlots].
- const field_type max_count = GetField<1>()[3];
+ const field_type max_count = GetField<2>()[3];
return max_count == field_type{kInternalNodeMaxCount}
? field_type{kNodeSlots}
: max_count;
@@ -661,21 +743,44 @@ class btree_node {
// Getter for whether the node is the root of the tree. The parent of the
// root of the tree is the leftmost node in the tree which is guaranteed to
// be a leaf.
- bool is_root() const { return parent()->leaf(); }
+ bool is_root() const { return parent()->is_leaf(); }
void make_root() {
assert(parent()->is_root());
+ set_generation(parent()->generation());
set_parent(parent()->parent());
}
+ // Gets the root node's generation integer, which is the one used by the tree.
+ uint32_t *get_root_generation() const {
+ assert(params_type::kEnableGenerations);
+ const btree_node *curr = this;
+ for (; !curr->is_root(); curr = curr->parent()) continue;
+ return const_cast<uint32_t *>(&curr->GetField<1>()[0]);
+ }
+
+ // Returns the generation for iterator validation.
+ uint32_t generation() const {
+ return params_type::kEnableGenerations ? *get_root_generation() : 0;
+ }
+ // Updates generation. Should only be called on a root node or during node
+ // initialization.
+ void set_generation(uint32_t generation) {
+ if (params_type::kEnableGenerations) GetField<1>()[0] = generation;
+ }
+ // Updates the generation. We do this whenever the node is mutated.
+ void next_generation() {
+ if (params_type::kEnableGenerations) ++*get_root_generation();
+ }
+
// Getters for the key/value at position i in the node.
const key_type &key(int i) const { return params_type::key(slot(i)); }
reference value(int i) { return params_type::element(slot(i)); }
const_reference value(int i) const { return params_type::element(slot(i)); }
// Getters/setter for the child at position i in the node.
- btree_node *child(int i) const { return GetField<3>()[i]; }
+ btree_node *child(int i) const { return GetField<4>()[i]; }
btree_node *start_child() const { return child(start()); }
- btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
+ btree_node *&mutable_child(int i) { return GetField<4>()[i]; }
void clear_child(int i) {
absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
}
@@ -832,7 +937,8 @@ class btree_node {
void merge(btree_node *src, allocator_type *alloc);
// Node allocation/deletion routines.
- void init_leaf(btree_node *parent, int max_count) {
+ void init_leaf(int max_count, btree_node *parent) {
+ set_generation(0);
set_parent(parent);
set_position(0);
set_start(0);
@@ -842,7 +948,7 @@ class btree_node {
start_slot(), max_count * sizeof(slot_type));
}
void init_internal(btree_node *parent) {
- init_leaf(parent, kNodeSlots);
+ init_leaf(kNodeSlots, parent);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
set_max_count(kInternalNodeMaxCount);
@@ -861,15 +967,18 @@ class btree_node {
private:
template <typename... Args>
void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
+ next_generation();
absl::container_internal::SanitizerUnpoisonObject(slot(i));
params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
}
void value_destroy(const field_type i, allocator_type *alloc) {
+ next_generation();
params_type::destroy(alloc, slot(i));
absl::container_internal::SanitizerPoisonObject(slot(i));
}
void value_destroy_n(const field_type i, const field_type n,
allocator_type *alloc) {
+ next_generation();
for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
params_type::destroy(alloc, s);
absl::container_internal::SanitizerPoisonObject(s);
@@ -885,6 +994,7 @@ class btree_node {
// Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
void transfer(const size_type dest_i, const size_type src_i,
btree_node *src_node, allocator_type *alloc) {
+ next_generation();
transfer(slot(dest_i), src_node->slot(src_i), alloc);
}
@@ -893,6 +1003,7 @@ class btree_node {
void transfer_n(const size_type n, const size_type dest_i,
const size_type src_i, btree_node *src_node,
allocator_type *alloc) {
+ next_generation();
for (slot_type *src = src_node->slot(src_i), *end = src + n,
*dest = slot(dest_i);
src != end; ++src, ++dest) {
@@ -905,6 +1016,7 @@ class btree_node {
void transfer_n_backward(const size_type n, const size_type dest_i,
const size_type src_i, btree_node *src_node,
allocator_type *alloc) {
+ next_generation();
for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
*dest = slot(dest_i + n - 1);
src != end; --src, --dest) {
@@ -915,13 +1027,13 @@ class btree_node {
template <typename P>
friend class btree;
template <typename N, typename R, typename P>
- friend struct btree_iterator;
+ friend class btree_iterator;
friend class BtreeNodePeer;
+ friend struct btree_access;
};
template <typename Node, typename Reference, typename Pointer>
-struct btree_iterator {
- private:
+class btree_iterator {
using key_type = typename Node::key_type;
using size_type = typename Node::size_type;
using params_type = typename Node::params_type;
@@ -949,9 +1061,15 @@ struct btree_iterator {
using reference = Reference;
using iterator_category = std::bidirectional_iterator_tag;
- btree_iterator() : node(nullptr), position(-1) {}
- explicit btree_iterator(Node *n) : node(n), position(n->start()) {}
- btree_iterator(Node *n, int p) : node(n), position(p) {}
+ btree_iterator() : btree_iterator(nullptr, -1) {}
+ explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {}
+ btree_iterator(Node *n, int p) : node_(n), position_(p) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ // Use `~uint32_t{}` as a sentinel value for iterator generations so it
+ // doesn't match the initial value for the actual generation.
+ generation_ = n != nullptr ? n->generation() : ~uint32_t{};
+#endif
+ }
// NOTE: this SFINAE allows for implicit conversions from iterator to
// const_iterator, but it specifically avoids hiding the copy constructor so
@@ -962,58 +1080,32 @@ struct btree_iterator {
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
- : node(other.node), position(other.position) {}
-
- private:
- // This SFINAE allows explicit conversions from const_iterator to
- // iterator, but also avoids hiding the copy constructor.
- // NOTE: the const_cast is safe because this constructor is only called by
- // non-const methods and the container owns the nodes.
- template <typename N, typename R, typename P,
- absl::enable_if_t<
- std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
- std::is_same<btree_iterator, iterator>::value,
- int> = 0>
- explicit btree_iterator(const btree_iterator<N, R, P> other)
- : node(const_cast<node_type *>(other.node)), position(other.position) {}
-
- // Increment/decrement the iterator.
- void increment() {
- if (node->leaf() && ++position < node->finish()) {
- return;
- }
- increment_slow();
- }
- void increment_slow();
-
- void decrement() {
- if (node->leaf() && --position >= node->start()) {
- return;
- }
- decrement_slow();
+ : node_(other.node_), position_(other.position_) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ generation_ = other.generation_;
+#endif
}
- void decrement_slow();
- public:
bool operator==(const iterator &other) const {
- return node == other.node && position == other.position;
+ return node_ == other.node_ && position_ == other.position_;
}
bool operator==(const const_iterator &other) const {
- return node == other.node && position == other.position;
+ return node_ == other.node_ && position_ == other.position_;
}
bool operator!=(const iterator &other) const {
- return node != other.node || position != other.position;
+ return node_ != other.node_ || position_ != other.position_;
}
bool operator!=(const const_iterator &other) const {
- return node != other.node || position != other.position;
+ return node_ != other.node_ || position_ != other.position_;
}
// Accessors for the key/value the iterator is pointing at.
reference operator*() const {
- ABSL_HARDENING_ASSERT(node != nullptr);
- ABSL_HARDENING_ASSERT(node->start() <= position);
- ABSL_HARDENING_ASSERT(node->finish() > position);
- return node->value(position);
+ ABSL_HARDENING_ASSERT(node_ != nullptr);
+ ABSL_HARDENING_ASSERT(node_->start() <= position_);
+ ABSL_HARDENING_ASSERT(node_->finish() > position_);
+ assert_valid_generation();
+ return node_->value(position_);
}
pointer operator->() const { return &operator*(); }
@@ -1051,23 +1143,84 @@ struct btree_iterator {
friend class btree_multiset_container;
template <typename TreeType, typename CheckerType>
friend class base_checker;
+ friend struct btree_access;
- const key_type &key() const { return node->key(position); }
- slot_type *slot() { return node->slot(position); }
+ // This SFINAE allows explicit conversions from const_iterator to
+ // iterator, but also avoids hiding the copy constructor.
+ // NOTE: the const_cast is safe because this constructor is only called by
+ // non-const methods and the container owns the nodes.
+ template <typename N, typename R, typename P,
+ absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
+ std::is_same<btree_iterator, iterator>::value,
+ int> = 0>
+ explicit btree_iterator(const btree_iterator<N, R, P> other)
+ : node_(const_cast<node_type *>(other.node_)),
+ position_(other.position_) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ generation_ = other.generation_;
+#endif
+ }
+
+ // Increment/decrement the iterator.
+ void increment() {
+ assert_valid_generation();
+ if (node_->is_leaf() && ++position_ < node_->finish()) {
+ return;
+ }
+ increment_slow();
+ }
+ void increment_slow();
+
+ void decrement() {
+ assert_valid_generation();
+ if (node_->is_leaf() && --position_ >= node_->start()) {
+ return;
+ }
+ decrement_slow();
+ }
+ void decrement_slow();
+
+ // Updates the generation. For use internally right before we return an
+ // iterator to the user.
+ void update_generation() {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ if (node_ != nullptr) generation_ = node_->generation();
+#endif
+ }
+
+ const key_type &key() const { return node_->key(position_); }
+ decltype(std::declval<Node *>()->slot(0)) slot() {
+ return node_->slot(position_);
+ }
+
+ void assert_valid_generation() const {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ if (node_ != nullptr && node_->generation() != generation_) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ "Attempting to use an invalidated iterator. The corresponding b-tree "
+ "container has been mutated since this iterator was constructed.");
+ }
+#endif
+ }
// The node in the tree the iterator is pointing at.
- Node *node;
+ Node *node_;
// The position within the node of the tree the iterator is pointing at.
// NOTE: this is an int rather than a field_type because iterators can point
// to invalid positions (such as -1) in certain circumstances.
- int position;
+ int position_;
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ // Used to check that the iterator hasn't been invalidated.
+ uint32_t generation_;
+#endif
};
template <typename Params>
class btree {
using node_type = btree_node<Params>;
using is_key_compare_to = typename Params::is_key_compare_to;
- using init_type = typename Params::init_type;
using field_type = typename node_type::field_type;
// We use a static empty node for the root/leftmost/rightmost of empty btrees
@@ -1075,6 +1228,9 @@ class btree {
struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
using field_type = typename node_type::field_type;
node_type *parent;
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ uint32_t generation = 0;
+#endif
field_type position = 0;
field_type start = 0;
field_type finish = 0;
@@ -1129,6 +1285,7 @@ class btree {
using size_type = typename Params::size_type;
using difference_type = typename Params::difference_type;
using key_compare = typename Params::key_compare;
+ using original_key_compare = typename Params::original_key_compare;
using value_compare = typename Params::value_compare;
using allocator_type = typename Params::allocator_type;
using reference = typename Params::reference;
@@ -1147,14 +1304,6 @@ class btree {
using slot_type = typename Params::slot_type;
private:
- // For use in copy_or_move_values_in_order.
- const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
- value_type &&maybe_move_from_iterator(iterator it) {
- // This is a destructive operation on the other container so it's safe for
- // us to const_cast and move from the keys here even if it's a set.
- return std::move(const_cast<value_type &>(*it));
- }
-
// Copies or moves (depending on the template parameter) the values in
// other into this btree in their order in other. This btree must be empty
// before this method is called. This method is used in copy construction,
@@ -1167,7 +1316,7 @@ class btree {
public:
btree(const key_compare &comp, const allocator_type &alloc)
- : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+ : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {}
btree(const btree &other) : btree(other, other.allocator()) {}
btree(const btree &other, const allocator_type &alloc)
@@ -1175,10 +1324,10 @@ class btree {
copy_or_move_values_in_order(other);
}
btree(btree &&other) noexcept
- : root_(std::move(other.root_)),
- rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
+ : root_(absl::exchange(other.root_, EmptyNode())),
+ rightmost_(std::move(other.rightmost_)),
size_(absl::exchange(other.size_, 0)) {
- other.mutable_root() = EmptyNode();
+ other.mutable_rightmost() = EmptyNode();
}
btree(btree &&other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
@@ -1203,9 +1352,9 @@ class btree {
iterator begin() { return iterator(leftmost()); }
const_iterator begin() const { return const_iterator(leftmost()); }
- iterator end() { return iterator(rightmost_, rightmost_->finish()); }
+ iterator end() { return iterator(rightmost(), rightmost()->finish()); }
const_iterator end() const {
- return const_iterator(rightmost_, rightmost_->finish());
+ return const_iterator(rightmost(), rightmost()->finish());
}
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
@@ -1331,14 +1480,16 @@ class btree {
void swap(btree &other);
const key_compare &key_comp() const noexcept {
- return root_.template get<0>();
+ return rightmost_.template get<0>();
}
template <typename K1, typename K2>
bool compare_keys(const K1 &a, const K2 &b) const {
return compare_internal::compare_result_as_less_than(key_comp()(a, b));
}
- value_compare value_comp() const { return value_compare(key_comp()); }
+ value_compare value_comp() const {
+ return value_compare(original_key_compare(key_comp()));
+ }
// Verifies the structure of the btree.
void verify() const;
@@ -1376,6 +1527,7 @@ class btree {
}
// The total number of bytes used by the btree.
+ // TODO(b/169338300): update to support node_btree_*.
size_type bytes_used() const {
node_stats stats = internal_stats(root());
if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
@@ -1419,11 +1571,20 @@ class btree {
allocator_type get_allocator() const { return allocator(); }
private:
+ friend struct btree_access;
+
// Internal accessor routines.
- node_type *root() { return root_.template get<2>(); }
- const node_type *root() const { return root_.template get<2>(); }
- node_type *&mutable_root() noexcept { return root_.template get<2>(); }
- key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
+ node_type *root() { return root_; }
+ const node_type *root() const { return root_; }
+ node_type *&mutable_root() noexcept { return root_; }
+ node_type *rightmost() { return rightmost_.template get<2>(); }
+ const node_type *rightmost() const { return rightmost_.template get<2>(); }
+ node_type *&mutable_rightmost() noexcept {
+ return rightmost_.template get<2>();
+ }
+ key_compare *mutable_key_comp() noexcept {
+ return &rightmost_.template get<0>();
+ }
// The leftmost node is stored as the parent of the root node.
node_type *leftmost() { return root()->parent(); }
@@ -1431,10 +1592,10 @@ class btree {
// Allocator routines.
allocator_type *mutable_allocator() noexcept {
- return &root_.template get<1>();
+ return &rightmost_.template get<1>();
}
const allocator_type &allocator() const noexcept {
- return root_.template get<1>();
+ return rightmost_.template get<1>();
}
// Allocates a correctly aligned node of at least size bytes using the
@@ -1453,12 +1614,12 @@ class btree {
}
node_type *new_leaf_node(node_type *parent) {
node_type *n = allocate(node_type::LeafSize());
- n->init_leaf(parent, kNodeSlots);
+ n->init_leaf(kNodeSlots, parent);
return n;
}
node_type *new_leaf_root_node(const int max_count) {
node_type *n = allocate(node_type::LeafSize(max_count));
- n->init_leaf(/*parent=*/n, max_count);
+ n->init_leaf(max_count, /*parent=*/n);
return n;
}
@@ -1482,10 +1643,10 @@ class btree {
void try_shrink();
iterator internal_end(iterator iter) {
- return iter.node != nullptr ? iter : end();
+ return iter.node_ != nullptr ? iter : end();
}
const_iterator internal_end(const_iterator iter) const {
- return iter.node != nullptr ? iter : end();
+ return iter.node_ != nullptr ? iter : end();
}
// Emplaces a value into the btree immediately before iter. Requires that
@@ -1495,9 +1656,8 @@ class btree {
// Returns an iterator pointing to the first value >= the value "iter" is
// pointing at. Note that "iter" might be pointing to an invalid location such
- // as iter.position == iter.node->finish(). This routine simply moves iter up
- // in the tree to a valid location.
- // Requires: iter.node is non-null.
+ // as iter.position_ == iter.node_->finish(). This routine simply moves iter
+ // up in the tree to a valid location. Requires: iter.node_ is non-null.
template <typename IterType>
static IterType internal_last(IterType iter);
@@ -1533,7 +1693,7 @@ class btree {
if (node == nullptr || (node == root() && empty())) {
return node_stats(0, 0);
}
- if (node->leaf()) {
+ if (node->is_leaf()) {
return node_stats(1, 0);
}
node_stats res(0, 1);
@@ -1543,15 +1703,14 @@ class btree {
return res;
}
- // We use compressed tuple in order to save space because key_compare and
- // allocator_type are usually empty.
- absl::container_internal::CompressedTuple<key_compare, allocator_type,
- node_type *>
- root_;
+ node_type *root_;
// A pointer to the rightmost node. Note that the leftmost node is stored as
- // the root's parent.
- node_type *rightmost_;
+ // the root's parent. We use compressed tuple in order to save space because
+ // key_compare and allocator_type are usually empty.
+ absl::container_internal::CompressedTuple<key_compare, allocator_type,
+ node_type *>
+ rightmost_;
// Number of values.
size_type size_;
@@ -1575,8 +1734,8 @@ inline void btree_node<P>::emplace_value(const size_type i,
value_init(i, alloc, std::forward<Args>(args)...);
set_finish(finish() + 1);
- if (!leaf() && finish() > i + 1) {
- for (int j = finish(); j > i + 1; --j) {
+ if (is_internal() && finish() > i + 1) {
+ for (field_type j = finish(); j > i + 1; --j) {
set_child(j, child(j - 1));
}
clear_child(i + 1);
@@ -1593,7 +1752,7 @@ inline void btree_node<P>::remove_values(const field_type i,
const field_type src_i = i + to_erase;
transfer_n(orig_finish - src_i, i, src_i, this, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Delete all children between begin and end.
for (int j = 0; j < to_erase; ++j) {
clear_and_delete(child(i + j + 1), alloc);
@@ -1630,7 +1789,7 @@ void btree_node<P>::rebalance_right_to_left(const int to_move,
right->transfer_n(right->count() - to_move, right->start(),
right->start() + to_move, right, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Move the child pointers from the right to the left node.
for (int i = 0; i < to_move; ++i) {
init_child(finish() + i + 1, right->child(i));
@@ -1677,7 +1836,7 @@ void btree_node<P>::rebalance_left_to_right(const int to_move,
// 4) Move the new delimiting value to the parent from the left node.
parent()->transfer(position(), finish() - to_move, this, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Move the child pointers from the left to the right node.
for (int i = right->finish(); i >= right->start(); --i) {
right->init_child(i + to_move, right->child(i));
@@ -1723,7 +1882,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
value_destroy(finish(), alloc);
parent()->init_child(position() + 1, dest);
- if (!leaf()) {
+ if (is_internal()) {
for (int i = dest->start(), j = finish() + 1; i <= dest->finish();
++i, ++j) {
assert(child(j) != nullptr);
@@ -1744,7 +1903,7 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
// Move the values from the right to the left node.
transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Move the child pointers from the right to the left node.
for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) {
init_child(j, src->child(i));
@@ -1762,7 +1921,7 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
template <typename P>
void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
- if (node->leaf()) {
+ if (node->is_leaf()) {
node->value_destroy_n(node->start(), node->count(), alloc);
deallocate(LeafSize(node->max_count()), node, alloc);
return;
@@ -1776,7 +1935,15 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
btree_node *delete_root_parent = node->parent();
// Navigate to the leftmost leaf under node, and then delete upwards.
- while (!node->leaf()) node = node->start_child();
+ while (node->is_internal()) node = node->start_child();
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ // When generations are enabled, we delete the leftmost leaf last in case it's
+ // the parent of the root and we need to check whether it's a leaf before we
+ // can update the root's generation.
+ // TODO(ezb): if we change btree_node::is_root to check a bool inside the node
+ // instead of checking whether the parent is a leaf, we can remove this logic.
+ btree_node *leftmost_leaf = node;
+#endif
// Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
// isn't guaranteed to be a valid `field_type`.
int pos = node->position();
@@ -1786,14 +1953,17 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
assert(pos <= parent->finish());
do {
node = parent->child(pos);
- if (!node->leaf()) {
+ if (node->is_internal()) {
// Navigate to the leftmost leaf under node.
- while (!node->leaf()) node = node->start_child();
+ while (node->is_internal()) node = node->start_child();
pos = node->position();
parent = node->parent();
}
node->value_destroy_n(node->start(), node->count(), alloc);
- deallocate(LeafSize(node->max_count()), node, alloc);
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ if (leftmost_leaf != node)
+#endif
+ deallocate(LeafSize(node->max_count()), node, alloc);
++pos;
} while (pos <= parent->finish());
@@ -1805,7 +1975,12 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
parent = node->parent();
node->value_destroy_n(node->start(), node->count(), alloc);
deallocate(InternalSize(), node, alloc);
- if (parent == delete_root_parent) return;
+ if (parent == delete_root_parent) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc);
+#endif
+ return;
+ }
++pos;
} while (pos > parent->finish());
}
@@ -1815,49 +1990,49 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
// btree_iterator methods
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::increment_slow() {
- if (node->leaf()) {
- assert(position >= node->finish());
+ if (node_->is_leaf()) {
+ assert(position_ >= node_->finish());
btree_iterator save(*this);
- while (position == node->finish() && !node->is_root()) {
- assert(node->parent()->child(node->position()) == node);
- position = node->position();
- node = node->parent();
+ while (position_ == node_->finish() && !node_->is_root()) {
+ assert(node_->parent()->child(node_->position()) == node_);
+ position_ = node_->position();
+ node_ = node_->parent();
}
// TODO(ezb): assert we aren't incrementing end() instead of handling.
- if (position == node->finish()) {
+ if (position_ == node_->finish()) {
*this = save;
}
} else {
- assert(position < node->finish());
- node = node->child(position + 1);
- while (!node->leaf()) {
- node = node->start_child();
+ assert(position_ < node_->finish());
+ node_ = node_->child(position_ + 1);
+ while (node_->is_internal()) {
+ node_ = node_->start_child();
}
- position = node->start();
+ position_ = node_->start();
}
}
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::decrement_slow() {
- if (node->leaf()) {
- assert(position <= -1);
+ if (node_->is_leaf()) {
+ assert(position_ <= -1);
btree_iterator save(*this);
- while (position < node->start() && !node->is_root()) {
- assert(node->parent()->child(node->position()) == node);
- position = node->position() - 1;
- node = node->parent();
+ while (position_ < node_->start() && !node_->is_root()) {
+ assert(node_->parent()->child(node_->position()) == node_);
+ position_ = node_->position() - 1;
+ node_ = node_->parent();
}
// TODO(ezb): assert we aren't decrementing begin() instead of handling.
- if (position < node->start()) {
+ if (position_ < node_->start()) {
*this = save;
}
} else {
- assert(position >= node->start());
- node = node->child(position);
- while (!node->leaf()) {
- node = node->child(node->finish());
+ assert(position_ >= node_->start());
+ node_ = node_->child(position_);
+ while (node_->is_internal()) {
+ node_ = node_->child(node_->finish());
}
- position = node->finish() - 1;
+ position_ = node_->finish() - 1;
}
}
@@ -1875,12 +2050,12 @@ void btree<P>::copy_or_move_values_in_order(Btree &other) {
// values is the same order we'll store them in.
auto iter = other.begin();
if (iter == other.end()) return;
- insert_multi(maybe_move_from_iterator(iter));
+ insert_multi(iter.slot());
++iter;
for (; iter != other.end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end
// of the tree.
- internal_emplace(end(), maybe_move_from_iterator(iter));
+ internal_emplace(end(), iter.slot());
}
}
@@ -1900,15 +2075,12 @@ constexpr bool btree<P>::static_assert_validation() {
"target node size too large");
// Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
- using compare_result_type =
- absl::result_of_t<key_compare(key_type, key_type)>;
static_assert(
- std::is_same<compare_result_type, bool>::value ||
- std::is_convertible<compare_result_type, absl::weak_ordering>::value,
+ compare_has_valid_result_type<key_compare, key_type>(),
"key comparison function must return absl::{weak,strong}_ordering or "
"bool.");
- // Test the assumption made in setting kNodeValueSpace.
+ // Test the assumption made in setting kNodeSlotSpace.
static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
"node space assumption incorrect");
@@ -1962,7 +2134,7 @@ template <typename K, typename... Args>
auto btree<P>::insert_unique(const K &key, Args &&... args)
-> std::pair<iterator, bool> {
if (empty()) {
- mutable_root() = rightmost_ = new_leaf_root_node(1);
+ mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
}
SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
@@ -1975,7 +2147,7 @@ auto btree<P>::insert_unique(const K &key, Args &&... args)
}
} else {
iterator last = internal_last(iter);
- if (last.node && !compare_keys(key, last.key())) {
+ if (last.node_ && !compare_keys(key, last.key())) {
// The key already exists in the tree, do nothing.
return {last, false};
}
@@ -2020,8 +2192,11 @@ template <typename P>
template <typename InputIterator>
void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
for (; b != e; ++b) {
- init_type value(*b);
- insert_hint_unique(end(), params_type::key(value), std::move(value));
+ // Use a node handle to manage a temp slot.
+ auto node_handle =
+ CommonAccess::Construct<node_handle_type>(get_allocator(), *b);
+ slot_type *slot = CommonAccess::GetSlot(node_handle);
+ insert_hint_unique(end(), params_type::key(slot), slot);
}
}
@@ -2029,11 +2204,11 @@ template <typename P>
template <typename ValueType>
auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
if (empty()) {
- mutable_root() = rightmost_ = new_leaf_root_node(1);
+ mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
}
iterator iter = internal_upper_bound(key);
- if (iter.node == nullptr) {
+ if (iter.node_ == nullptr) {
iter = end();
}
return internal_emplace(iter, std::forward<ValueType>(v));
@@ -2093,15 +2268,15 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
- // Note: `root_` also contains the allocator and the key comparator.
swap(root_, other.root_);
+ // Note: `rightmost_` also contains the allocator and the key comparator.
swap(rightmost_, other.rightmost_);
swap(size_, other.size_);
} else {
if (allocator() == other.allocator()) {
swap(mutable_root(), other.mutable_root());
swap(*mutable_key_comp(), *other.mutable_key_comp());
- swap(rightmost_, other.rightmost_);
+ swap(mutable_rightmost(), other.mutable_rightmost());
swap(size_, other.size_);
} else {
// We aren't allowed to propagate the allocator and the allocator is
@@ -2119,22 +2294,29 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
template <typename P>
auto btree<P>::erase(iterator iter) -> iterator {
- bool internal_delete = false;
- if (!iter.node->leaf()) {
- // Deletion of a value on an internal node. First, move the largest value
- // from our left child here, then delete that position (in remove_values()
- // below). We can get to the largest value from our left child by
- // decrementing iter.
+ iter.node_->value_destroy(iter.position_, mutable_allocator());
+ iter.update_generation();
+
+ const bool internal_delete = iter.node_->is_internal();
+ if (internal_delete) {
+ // Deletion of a value on an internal node. First, transfer the largest
+ // value from our left child here, then erase/rebalance from that position.
+ // We can get to the largest value from our left child by decrementing iter.
iterator internal_iter(iter);
--iter;
- assert(iter.node->leaf());
- params_type::move(mutable_allocator(), iter.node->slot(iter.position),
- internal_iter.node->slot(internal_iter.position));
- internal_delete = true;
- }
-
- // Delete the key from the leaf.
- iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
+ assert(iter.node_->is_leaf());
+ internal_iter.node_->transfer(internal_iter.position_, iter.position_,
+ iter.node_, mutable_allocator());
+ } else {
+ // Shift values after erased position in leaf. In the internal case, we
+ // don't need to do this because the leaf position is the end of the node.
+ const field_type transfer_from = iter.position_ + 1;
+ const field_type num_to_transfer = iter.node_->finish() - transfer_from;
+ iter.node_->transfer_n(num_to_transfer, iter.position_, transfer_from,
+ iter.node_, mutable_allocator());
+ }
+ // Update node finish and container size.
+ iter.node_->set_finish(iter.node_->finish() - 1);
--size_;
// We want to return the next value after the one we just erased. If we
@@ -2142,7 +2324,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
// value is ++(++iter). If we erased from a leaf node (internal_delete ==
// false) then the next value is ++iter. Note that ++iter may point to an
// internal node and the value in the internal node may move to a leaf node
- // (iter.node) when rebalancing is performed at the leaf level.
+ // (iter.node_) when rebalancing is performed at the leaf level.
iterator res = rebalance_after_delete(iter);
@@ -2159,14 +2341,14 @@ auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
iterator res(iter);
bool first_iteration = true;
for (;;) {
- if (iter.node == root()) {
+ if (iter.node_ == root()) {
try_shrink();
if (empty()) {
return end();
}
break;
}
- if (iter.node->count() >= kMinNodeValues) {
+ if (iter.node_->count() >= kMinNodeValues) {
break;
}
bool merged = try_merge_or_rebalance(&iter);
@@ -2179,14 +2361,15 @@ auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
if (!merged) {
break;
}
- iter.position = iter.node->position();
- iter.node = iter.node->parent();
+ iter.position_ = iter.node_->position();
+ iter.node_ = iter.node_->parent();
}
+ res.update_generation();
// Adjust our return value. If we're pointing at the end of a node, advance
// the iterator.
- if (res.position == res.node->finish()) {
- res.position = res.node->finish() - 1;
+ if (res.position_ == res.node_->finish()) {
+ res.position_ = res.node_->finish() - 1;
++res;
}
@@ -2203,33 +2386,36 @@ auto btree<P>::erase_range(iterator begin, iterator end)
return {0, begin};
}
- if (count == size_) {
+ if (static_cast<size_type>(count) == size_) {
clear();
return {count, this->end()};
}
- if (begin.node == end.node) {
- assert(end.position > begin.position);
- begin.node->remove_values(begin.position, end.position - begin.position,
- mutable_allocator());
+ if (begin.node_ == end.node_) {
+ assert(end.position_ > begin.position_);
+ begin.node_->remove_values(begin.position_, end.position_ - begin.position_,
+ mutable_allocator());
size_ -= count;
return {count, rebalance_after_delete(begin)};
}
const size_type target_size = size_ - count;
while (size_ > target_size) {
- if (begin.node->leaf()) {
+ if (begin.node_->is_leaf()) {
const size_type remaining_to_erase = size_ - target_size;
- const size_type remaining_in_node = begin.node->finish() - begin.position;
+ const size_type remaining_in_node =
+ begin.node_->finish() - begin.position_;
const size_type to_erase =
(std::min)(remaining_to_erase, remaining_in_node);
- begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+ begin.node_->remove_values(begin.position_, to_erase,
+ mutable_allocator());
size_ -= to_erase;
begin = rebalance_after_delete(begin);
} else {
begin = erase(begin);
}
}
+ begin.update_generation();
return {count, begin};
}
@@ -2238,8 +2424,7 @@ void btree<P>::clear() {
if (!empty()) {
node_type::clear_and_delete(root(), mutable_allocator());
}
- mutable_root() = EmptyNode();
- rightmost_ = EmptyNode();
+ mutable_root() = mutable_rightmost() = EmptyNode();
size_ = 0;
}
@@ -2248,15 +2433,15 @@ void btree<P>::swap(btree &other) {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_swap::value) {
- // Note: `root_` also contains the allocator and the key comparator.
- swap(root_, other.root_);
+ // Note: `rightmost_` also contains the allocator and the key comparator.
+ swap(rightmost_, other.rightmost_);
} else {
// It's undefined behavior if the allocators are unequal here.
assert(allocator() == other.allocator());
- swap(mutable_root(), other.mutable_root());
+ swap(mutable_rightmost(), other.mutable_rightmost());
swap(*mutable_key_comp(), *other.mutable_key_comp());
}
- swap(rightmost_, other.rightmost_);
+ swap(mutable_root(), other.mutable_root());
swap(size_, other.size_);
}
@@ -2264,18 +2449,18 @@ template <typename P>
void btree<P>::verify() const {
assert(root() != nullptr);
assert(leftmost() != nullptr);
- assert(rightmost_ != nullptr);
+ assert(rightmost() != nullptr);
assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
- assert(leftmost() == (++const_iterator(root(), -1)).node);
- assert(rightmost_ == (--const_iterator(root(), root()->finish())).node);
- assert(leftmost()->leaf());
- assert(rightmost_->leaf());
+ assert(leftmost() == (++const_iterator(root(), -1)).node_);
+ assert(rightmost() == (--const_iterator(root(), root()->finish())).node_);
+ assert(leftmost()->is_leaf());
+ assert(rightmost()->is_leaf());
}
template <typename P>
void btree<P>::rebalance_or_split(iterator *iter) {
- node_type *&node = iter->node;
- int &insert_position = iter->position;
+ node_type *&node = iter->node_;
+ int &insert_position = iter->position_;
assert(node->count() == node->max_count());
assert(kNodeSlots == node->max_count());
@@ -2350,19 +2535,20 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// Create a new root node and set the current root node as the child of the
// new root.
parent = new_internal_node(parent);
+ parent->set_generation(root()->generation());
parent->init_child(parent->start(), root());
mutable_root() = parent;
// If the former root was a leaf node, then it's now the rightmost node.
- assert(!parent->start_child()->leaf() ||
- parent->start_child() == rightmost_);
+ assert(parent->start_child()->is_internal() ||
+ parent->start_child() == rightmost());
}
// Split the node.
node_type *split_node;
- if (node->leaf()) {
+ if (node->is_leaf()) {
split_node = new_leaf_node(parent);
node->split(insert_position, split_node, mutable_allocator());
- if (rightmost_ == node) rightmost_ = split_node;
+ if (rightmost() == node) mutable_rightmost() = split_node;
} else {
split_node = new_internal_node(parent);
node->split(insert_position, split_node, mutable_allocator());
@@ -2377,55 +2563,56 @@ void btree<P>::rebalance_or_split(iterator *iter) {
template <typename P>
void btree<P>::merge_nodes(node_type *left, node_type *right) {
left->merge(right, mutable_allocator());
- if (rightmost_ == right) rightmost_ = left;
+ if (rightmost() == right) mutable_rightmost() = left;
}
template <typename P>
bool btree<P>::try_merge_or_rebalance(iterator *iter) {
- node_type *parent = iter->node->parent();
- if (iter->node->position() > parent->start()) {
+ node_type *parent = iter->node_->parent();
+ if (iter->node_->position() > parent->start()) {
// Try merging with our left sibling.
- node_type *left = parent->child(iter->node->position() - 1);
+ node_type *left = parent->child(iter->node_->position() - 1);
assert(left->max_count() == kNodeSlots);
- if (1U + left->count() + iter->node->count() <= kNodeSlots) {
- iter->position += 1 + left->count();
- merge_nodes(left, iter->node);
- iter->node = left;
+ if (1U + left->count() + iter->node_->count() <= kNodeSlots) {
+ iter->position_ += 1 + left->count();
+ merge_nodes(left, iter->node_);
+ iter->node_ = left;
return true;
}
}
- if (iter->node->position() < parent->finish()) {
+ if (iter->node_->position() < parent->finish()) {
// Try merging with our right sibling.
- node_type *right = parent->child(iter->node->position() + 1);
+ node_type *right = parent->child(iter->node_->position() + 1);
assert(right->max_count() == kNodeSlots);
- if (1U + iter->node->count() + right->count() <= kNodeSlots) {
- merge_nodes(iter->node, right);
+ if (1U + iter->node_->count() + right->count() <= kNodeSlots) {
+ merge_nodes(iter->node_, right);
return true;
}
// Try rebalancing with our right sibling. We don't perform rebalancing if
- // we deleted the first element from iter->node and the node is not
+ // we deleted the first element from iter->node_ and the node is not
// empty. This is a small optimization for the common pattern of deleting
// from the front of the tree.
if (right->count() > kMinNodeValues &&
- (iter->node->count() == 0 || iter->position > iter->node->start())) {
- int to_move = (right->count() - iter->node->count()) / 2;
+ (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) {
+ int to_move = (right->count() - iter->node_->count()) / 2;
to_move = (std::min)(to_move, right->count() - 1);
- iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
+ iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator());
return false;
}
}
- if (iter->node->position() > parent->start()) {
+ if (iter->node_->position() > parent->start()) {
// Try rebalancing with our left sibling. We don't perform rebalancing if
- // we deleted the last element from iter->node and the node is not
+ // we deleted the last element from iter->node_ and the node is not
// empty. This is a small optimization for the common pattern of deleting
// from the back of the tree.
- node_type *left = parent->child(iter->node->position() - 1);
+ node_type *left = parent->child(iter->node_->position() - 1);
if (left->count() > kMinNodeValues &&
- (iter->node->count() == 0 || iter->position < iter->node->finish())) {
- int to_move = (left->count() - iter->node->count()) / 2;
+ (iter->node_->count() == 0 ||
+ iter->position_ < iter->node_->finish())) {
+ int to_move = (left->count() - iter->node_->count()) / 2;
to_move = (std::min)(to_move, left->count() - 1);
- left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
- iter->position += to_move;
+ left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator());
+ iter->position_ += to_move;
return false;
}
}
@@ -2439,9 +2626,9 @@ void btree<P>::try_shrink() {
return;
}
// Deleted the last item on the root node, shrink the height of the tree.
- if (orig_root->leaf()) {
+ if (orig_root->is_leaf()) {
assert(size() == 0);
- mutable_root() = rightmost_ = EmptyNode();
+ mutable_root() = mutable_rightmost() = EmptyNode();
} else {
node_type *child = orig_root->start_child();
child->make_root();
@@ -2453,15 +2640,16 @@ void btree<P>::try_shrink() {
template <typename P>
template <typename IterType>
inline IterType btree<P>::internal_last(IterType iter) {
- assert(iter.node != nullptr);
- while (iter.position == iter.node->finish()) {
- iter.position = iter.node->position();
- iter.node = iter.node->parent();
- if (iter.node->leaf()) {
- iter.node = nullptr;
+ assert(iter.node_ != nullptr);
+ while (iter.position_ == iter.node_->finish()) {
+ iter.position_ = iter.node_->position();
+ iter.node_ = iter.node_->parent();
+ if (iter.node_->is_leaf()) {
+ iter.node_ = nullptr;
break;
}
}
+ iter.update_generation();
return iter;
}
@@ -2469,37 +2657,39 @@ template <typename P>
template <typename... Args>
inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
-> iterator {
- if (!iter.node->leaf()) {
+ if (iter.node_->is_internal()) {
// We can't insert on an internal node. Instead, we'll insert after the
// previous value which is guaranteed to be on a leaf node.
--iter;
- ++iter.position;
+ ++iter.position_;
}
- const field_type max_count = iter.node->max_count();
+ const field_type max_count = iter.node_->max_count();
allocator_type *alloc = mutable_allocator();
- if (iter.node->count() == max_count) {
+ if (iter.node_->count() == max_count) {
// Make room in the leaf for the new item.
if (max_count < kNodeSlots) {
// Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node.
- assert(iter.node == root());
- iter.node =
+ assert(iter.node_ == root());
+ iter.node_ =
new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
// Transfer the values from the old root to the new root.
node_type *old_root = root();
- node_type *new_root = iter.node;
+ node_type *new_root = iter.node_;
new_root->transfer_n(old_root->count(), new_root->start(),
old_root->start(), old_root, alloc);
new_root->set_finish(old_root->finish());
old_root->set_finish(old_root->start());
+ new_root->set_generation(old_root->generation());
node_type::clear_and_delete(old_root, alloc);
- mutable_root() = rightmost_ = new_root;
+ mutable_root() = mutable_rightmost() = new_root;
} else {
rebalance_or_split(&iter);
}
}
- iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
+ iter.node_->emplace_value(iter.position_, alloc, std::forward<Args>(args)...);
++size_;
+ iter.update_generation();
return iter;
}
@@ -2510,8 +2700,8 @@ inline auto btree<P>::internal_locate(const K &key) const
iterator iter(const_cast<node_type *>(root()));
for (;;) {
SearchResult<int, is_key_compare_to::value> res =
- iter.node->lower_bound(key, key_comp());
- iter.position = res.value;
+ iter.node_->lower_bound(key, key_comp());
+ iter.position_ = res.value;
if (res.IsEq()) {
return {iter, MatchKind::kEq};
}
@@ -2519,10 +2709,10 @@ inline auto btree<P>::internal_locate(const K &key) const
// down the tree if the keys are equal, but determining equality would
// require doing an extra comparison on each node on the way down, and we
// will need to go all the way to the leaf node in the expected case.
- if (iter.node->leaf()) {
+ if (iter.node_->is_leaf()) {
break;
}
- iter.node = iter.node->child(iter.position);
+ iter.node_ = iter.node_->child(iter.position_);
}
// Note: in the non-key-compare-to case, the key may actually be equivalent
// here (and the MatchKind::kNe is ignored).
@@ -2542,13 +2732,13 @@ auto btree<P>::internal_lower_bound(const K &key) const
SearchResult<int, is_key_compare_to::value> res;
bool seen_eq = false;
for (;;) {
- res = iter.node->lower_bound(key, key_comp());
- iter.position = res.value;
- if (iter.node->leaf()) {
+ res = iter.node_->lower_bound(key, key_comp());
+ iter.position_ = res.value;
+ if (iter.node_->is_leaf()) {
break;
}
seen_eq = seen_eq || res.IsEq();
- iter.node = iter.node->child(iter.position);
+ iter.node_ = iter.node_->child(iter.position_);
}
if (res.IsEq()) return {iter, MatchKind::kEq};
return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
@@ -2559,11 +2749,11 @@ template <typename K>
auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
iterator iter(const_cast<node_type *>(root()));
for (;;) {
- iter.position = iter.node->upper_bound(key, key_comp());
- if (iter.node->leaf()) {
+ iter.position_ = iter.node_->upper_bound(key, key_comp());
+ if (iter.node_->is_leaf()) {
break;
}
- iter.node = iter.node->child(iter.position);
+ iter.node_ = iter.node_->child(iter.position_);
}
return internal_last(iter);
}
@@ -2578,7 +2768,7 @@ auto btree<P>::internal_find(const K &key) const -> iterator {
}
} else {
const iterator iter = internal_last(res.value);
- if (iter.node != nullptr && !compare_keys(key, iter.key())) {
+ if (iter.node_ != nullptr && !compare_keys(key, iter.key())) {
return iter;
}
}
@@ -2600,7 +2790,7 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo,
assert(!compare_keys(node->key(i), node->key(i - 1)));
}
int count = node->count();
- if (!node->leaf()) {
+ if (node->is_internal()) {
for (int i = node->start(); i <= node->finish(); ++i) {
assert(node->child(i) != nullptr);
assert(node->child(i)->parent() == node);
@@ -2613,6 +2803,50 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo,
return count;
}
+struct btree_access {
+ template <typename BtreeContainer, typename Pred>
+ static auto erase_if(BtreeContainer &container, Pred pred)
+ -> typename BtreeContainer::size_type {
+ const auto initial_size = container.size();
+ auto &tree = container.tree_;
+ auto *alloc = tree.mutable_allocator();
+ for (auto it = container.begin(); it != container.end();) {
+ if (!pred(*it)) {
+ ++it;
+ continue;
+ }
+ auto *node = it.node_;
+ if (node->is_internal()) {
+ // Handle internal nodes normally.
+ it = container.erase(it);
+ continue;
+ }
+ // If this is a leaf node, then we do all the erases from this node
+ // at once before doing rebalancing.
+
+ // The current position to transfer slots to.
+ int to_pos = it.position_;
+ node->value_destroy(it.position_, alloc);
+ while (++it.position_ < node->finish()) {
+ it.update_generation();
+ if (pred(*it)) {
+ node->value_destroy(it.position_, alloc);
+ } else {
+ node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc);
+ }
+ }
+ const int num_deleted = node->finish() - to_pos;
+ tree.size_ -= num_deleted;
+ node->set_finish(to_pos);
+ it.position_ = to_pos;
+ it = tree.rebalance_after_delete(it);
+ }
+ return initial_size - container.size();
+ }
+};
+
+#undef ABSL_BTREE_ENABLE_GENERATIONS
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index 03be708e..fc2f740a 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -20,6 +20,7 @@
#include <iterator>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h"
@@ -43,15 +44,15 @@ class btree_container {
// transparent case.
template <class K>
using key_arg =
- typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
- template type<K, typename Tree::key_type>;
+ typename KeyArg<params_type::kIsKeyCompareTransparent>::template type<
+ K, typename Tree::key_type>;
public:
using key_type = typename Tree::key_type;
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
using difference_type = typename Tree::difference_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using value_compare = typename Tree::value_compare;
using allocator_type = typename Tree::allocator_type;
using reference = typename Tree::reference;
@@ -165,9 +166,10 @@ class btree_container {
// Extract routines.
node_type extract(iterator position) {
- // Use Move instead of Transfer, because the rebalancing code expects to
- // have a valid object to scribble metadata bits on top of.
- auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
+ // Use Construct instead of Transfer because the rebalancing code will
+ // destroy the slot later.
+ auto node =
+ CommonAccess::Construct<node_type>(get_allocator(), position.slot());
erase(position);
return node;
}
@@ -176,7 +178,7 @@ class btree_container {
}
// Utility routines.
- void clear() { tree_.clear(); }
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); }
void swap(btree_container &other) { tree_.swap(other.tree_); }
void verify() const { tree_.verify(); }
@@ -214,7 +216,7 @@ class btree_container {
allocator_type get_allocator() const { return tree_.get_allocator(); }
// The key comparator used by the btree.
- key_compare key_comp() const { return tree_.key_comp(); }
+ key_compare key_comp() const { return key_compare(tree_.key_comp()); }
value_compare value_comp() const { return tree_.value_comp(); }
// Support absl::Hash.
@@ -227,6 +229,7 @@ class btree_container {
}
protected:
+ friend struct btree_access;
Tree tree_;
};
@@ -247,7 +250,7 @@ class btree_set_container : public btree_container<Tree> {
using key_type = typename Tree::key_type;
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -289,8 +292,11 @@ class btree_set_container : public btree_container<Tree> {
}
template <typename... Args>
std::pair<iterator, bool> emplace(Args &&... args) {
- init_type v(std::forward<Args>(args)...);
- return this->tree_.insert_unique(params_type::key(v), std::move(v));
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ auto *slot = CommonAccess::GetSlot(node);
+ return this->tree_.insert_unique(params_type::key(slot), slot);
}
iterator insert(const_iterator hint, const value_type &v) {
return this->tree_
@@ -304,9 +310,12 @@ class btree_set_container : public btree_container<Tree> {
}
template <typename... Args>
iterator emplace_hint(const_iterator hint, Args &&... args) {
- init_type v(std::forward<Args>(args)...);
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ auto *slot = CommonAccess::GetSlot(node);
return this->tree_
- .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
+ .insert_hint_unique(iterator(hint), params_type::key(slot), slot)
.first;
}
template <typename InputIterator>
@@ -398,7 +407,7 @@ class btree_map_container : public btree_set_container<Tree> {
using key_type = typename Tree::key_type;
using mapped_type = typename params_type::mapped_type;
using value_type = typename Tree::value_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -535,6 +544,7 @@ class btree_multiset_container : public btree_container<Tree> {
using params_type = typename Tree::params_type;
using init_type = typename params_type::init_type;
using is_key_compare_to = typename params_type::is_key_compare_to;
+ friend class BtreeNodePeer;
template <class K>
using key_arg = typename super_type::template key_arg<K>;
@@ -543,7 +553,7 @@ class btree_multiset_container : public btree_container<Tree> {
using key_type = typename Tree::key_type;
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -595,12 +605,18 @@ class btree_multiset_container : public btree_container<Tree> {
}
template <typename... Args>
iterator emplace(Args &&... args) {
- return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ return this->tree_.insert_multi(CommonAccess::GetSlot(node));
}
template <typename... Args>
iterator emplace_hint(const_iterator hint, Args &&... args) {
- return this->tree_.insert_hint_multi(
- iterator(hint), init_type(std::forward<Args>(args)...));
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ return this->tree_.insert_hint_multi(iterator(hint),
+ CommonAccess::GetSlot(node));
}
iterator insert(node_type &&node) {
if (!node) return this->end();
@@ -666,6 +682,7 @@ template <typename Tree>
class btree_multimap_container : public btree_multiset_container<Tree> {
using super_type = btree_multiset_container<Tree>;
using params_type = typename Tree::params_type;
+ friend class BtreeNodePeer;
public:
using mapped_type = typename params_type::mapped_type;
diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h
index 030e9d4a..416d9aa3 100644
--- a/absl/container/internal/common.h
+++ b/absl/container/internal/common.h
@@ -84,10 +84,11 @@ class node_handle_base {
PolicyTraits::transfer(alloc(), slot(), s);
}
- struct move_tag_t {};
- node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ struct construct_tag_t {};
+ template <typename... Args>
+ node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args)
: alloc_(a) {
- PolicyTraits::construct(alloc(), slot(), s);
+ PolicyTraits::construct(alloc(), slot(), std::forward<Args>(args)...);
}
void destroy() {
@@ -186,8 +187,8 @@ struct CommonAccess {
}
template <typename T, typename... Args>
- static T Move(Args&&... args) {
- return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ static T Construct(Args&&... args) {
+ return T(typename T::construct_tag_t{}, std::forward<Args>(args)...);
}
};
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 62a7483e..74111f97 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -403,6 +403,16 @@ TEST(CompressedTupleTest, EmptyFinalClass) {
}
#endif
+// TODO(b/214288561): enable this test.
+TEST(CompressedTupleTest, DISABLED_NestedEbo) {
+ struct Empty1 {};
+ struct Empty2 {};
+ CompressedTuple<Empty1, CompressedTuple<Empty2>, int> x;
+ CompressedTuple<Empty1, Empty2, int> y;
+ // Currently fails with sizeof(x) == 8, sizeof(y) == 4.
+ EXPECT_EQ(sizeof(x), sizeof(y));
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index e67529ec..00e9f6d7 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -174,7 +174,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
//
// 2. auto a = PairArgs(args...);
// std::pair<F, S> p(std::piecewise_construct,
-// std::move(p.first), std::move(p.second));
+// std::move(a.first), std::move(a.second));
inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
@@ -402,6 +402,15 @@ struct map_slot_policy {
}
}
+ // Construct this slot by copying from another slot.
+ template <class Allocator>
+ static void construct(Allocator* alloc, slot_type* slot,
+ const slot_type* other) {
+ emplace(slot);
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ other->value);
+ }
+
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
if (kMutableKeys::value) {
@@ -424,33 +433,6 @@ struct map_slot_policy {
}
destroy(alloc, old_slot);
}
-
- template <class Allocator>
- static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
- if (kMutableKeys::value) {
- using std::swap;
- swap(a->mutable_value, b->mutable_value);
- } else {
- value_type tmp = std::move(a->value);
- absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
- std::move(b->value));
- absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
- std::move(tmp));
- }
- }
-
- template <class Allocator>
- static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
- if (kMutableKeys::value) {
- dest->mutable_value = std::move(src->mutable_value);
- } else {
- absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
- std::move(src->value));
- }
- }
};
} // namespace container_internal
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
index 927cf082..66068a5a 100644
--- a/absl/container/internal/counting_allocator.h
+++ b/absl/container/internal/counting_allocator.h
@@ -80,7 +80,15 @@ class CountingAllocator {
template <typename U>
void destroy(U* p) {
Allocator allocator;
+ // Ignore GCC warning bug.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuse-after-free"
+#endif
AllocatorTraits::destroy(allocator, p);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
if (instance_count_ != nullptr) {
*instance_count_ -= 1;
}
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
index 0683422a..250e662c 100644
--- a/absl/container/internal/hash_function_defaults.h
+++ b/absl/container/internal/hash_function_defaults.h
@@ -78,24 +78,26 @@ struct StringHash {
}
};
+struct StringEq {
+ using is_transparent = void;
+ bool operator()(absl::string_view lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+};
+
// Supports heterogeneous lookup for string-like elements.
struct StringHashEq {
using Hash = StringHash;
- struct Eq {
- using is_transparent = void;
- bool operator()(absl::string_view lhs, absl::string_view rhs) const {
- return lhs == rhs;
- }
- bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
- return lhs == rhs;
- }
- bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
- return lhs == rhs;
- }
- bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
- return lhs == rhs;
- }
- };
+ using Eq = StringEq;
};
template <>
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index 59576b8e..9f0a4c72 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -310,7 +310,7 @@ struct StringLikeTest : public ::testing::Test {
hash_default_hash<typename T::first_type> hash;
};
-TYPED_TEST_CASE_P(StringLikeTest);
+TYPED_TEST_SUITE_P(StringLikeTest);
TYPED_TEST_P(StringLikeTest, Eq) {
EXPECT_TRUE(this->eq(this->a1, this->b1));
diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h
index 6869fe45..f1f555a5 100644
--- a/absl/container/internal/hash_generator_testing.h
+++ b/absl/container/internal/hash_generator_testing.h
@@ -21,11 +21,13 @@
#include <stdint.h>
#include <algorithm>
+#include <cassert>
#include <iosfwd>
#include <random>
#include <tuple>
#include <type_traits>
#include <utility>
+#include <vector>
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/memory/memory.h"
@@ -153,6 +155,25 @@ using GeneratedType = decltype(
typename Container::value_type,
typename Container::key_type>::type>&>()());
+// Naive wrapper that performs a linear search of previous values.
+// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
+template <class T, size_t kMaxValues = 64, class E = void>
+struct UniqueGenerator {
+ Generator<T, E> gen;
+ std::vector<T> values;
+
+ T operator()() {
+ assert(values.size() < kMaxValues);
+ for (;;) {
+ T value = gen();
+ if (std::find(values.begin(), values.end(), value) == values.end()) {
+ values.push_back(value);
+ return value;
+ }
+ }
+ }
+};
+
} // namespace hash_internal
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 5a29bed7..efc1be58 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -21,49 +21,55 @@
#include <limits>
#include "absl/base/attributes.h"
-#include "absl/base/internal/exponential_biased.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/base/config.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
+#include "absl/profiling/internal/exponential_biased.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int HashtablezInfo::kMaxStackDepth;
+#endif
namespace {
ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
-ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
+std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr};
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
+ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
+void TriggerHashtablezConfigListener() {
+ auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire);
+ if (listener != nullptr) listener();
+}
+
} // namespace
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-HashtablezSampler& HashtablezSampler::Global() {
+HashtablezSampler& GlobalHashtablezSampler() {
static auto* sampler = new HashtablezSampler();
return *sampler;
}
-HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
- DisposeCallback f) {
- return dispose_.exchange(f, std::memory_order_relaxed);
-}
-
-HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
-void HashtablezInfo::PrepareForSampling() {
+void HashtablezInfo::PrepareForSampling(int64_t stride,
+ size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -73,100 +79,16 @@ void HashtablezInfo::PrepareForSampling() {
hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
hashes_bitwise_xor.store(0, std::memory_order_relaxed);
+ max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
+ weight = stride;
// The inliner makes hardcoded skip_count difficult (especially when combined
// with LTO). We use the ability to exclude stacks by regex when encoding
// instead.
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
- dead = nullptr;
-}
-
-HashtablezSampler::HashtablezSampler()
- : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
- absl::MutexLock l(&graveyard_.init_mu);
- graveyard_.dead = &graveyard_;
-}
-
-HashtablezSampler::~HashtablezSampler() {
- HashtablezInfo* s = all_.load(std::memory_order_acquire);
- while (s != nullptr) {
- HashtablezInfo* next = s->next;
- delete s;
- s = next;
- }
-}
-
-void HashtablezSampler::PushNew(HashtablezInfo* sample) {
- sample->next = all_.load(std::memory_order_relaxed);
- while (!all_.compare_exchange_weak(sample->next, sample,
- std::memory_order_release,
- std::memory_order_relaxed)) {
- }
-}
-
-void HashtablezSampler::PushDead(HashtablezInfo* sample) {
- if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
- dispose(*sample);
- }
-
- absl::MutexLock graveyard_lock(&graveyard_.init_mu);
- absl::MutexLock sample_lock(&sample->init_mu);
- sample->dead = graveyard_.dead;
- graveyard_.dead = sample;
-}
-
-HashtablezInfo* HashtablezSampler::PopDead() {
- absl::MutexLock graveyard_lock(&graveyard_.init_mu);
-
- // The list is circular, so eventually it collapses down to
- // graveyard_.dead == &graveyard_
- // when it is empty.
- HashtablezInfo* sample = graveyard_.dead;
- if (sample == &graveyard_) return nullptr;
-
- absl::MutexLock sample_lock(&sample->init_mu);
- graveyard_.dead = sample->dead;
- sample->PrepareForSampling();
- return sample;
-}
-
-HashtablezInfo* HashtablezSampler::Register() {
- int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
- if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
- size_estimate_.fetch_sub(1, std::memory_order_relaxed);
- dropped_samples_.fetch_add(1, std::memory_order_relaxed);
- return nullptr;
- }
-
- HashtablezInfo* sample = PopDead();
- if (sample == nullptr) {
- // Resurrection failed. Hire a new warlock.
- sample = new HashtablezInfo();
- PushNew(sample);
- }
-
- return sample;
-}
-
-void HashtablezSampler::Unregister(HashtablezInfo* sample) {
- PushDead(sample);
- size_estimate_.fetch_sub(1, std::memory_order_relaxed);
-}
-
-int64_t HashtablezSampler::Iterate(
- const std::function<void(const HashtablezInfo& stack)>& f) {
- HashtablezInfo* s = all_.load(std::memory_order_acquire);
- while (s != nullptr) {
- absl::MutexLock l(&s->init_mu);
- if (s->dead == nullptr) {
- f(*s);
- }
- s = s->next;
- }
-
- return dropped_samples_.load(std::memory_order_relaxed);
+ inline_element_size = inline_element_size_value;
}
static bool ShouldForceSampling() {
@@ -189,21 +111,32 @@ static bool ShouldForceSampling() {
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample) {
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
- *next_sample = 1;
- return HashtablezSampler::Global().Register();
+ next_sample.next_sample = 1;
+ const int64_t old_stride = exchange(next_sample.sample_stride, 1);
+ HashtablezInfo* result =
+ GlobalHashtablezSampler().Register(old_stride, inline_element_size);
+ return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- *next_sample = std::numeric_limits<int64_t>::max();
+ next_sample = {
+ std::numeric_limits<int64_t>::max(),
+ std::numeric_limits<int64_t>::max(),
+ };
return nullptr;
#else
- bool first = *next_sample < 0;
- *next_sample = g_exponential_biased_generator.GetStride(
+ bool first = next_sample.next_sample < 0;
+
+ const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+ next_sample.next_sample = next_stride;
+ const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
// Small values of interval are equivalent to just sampling next time.
- ABSL_ASSERT(*next_sample >= 1);
+ ABSL_ASSERT(next_stride >= 1);
// g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
// low enough that we will start sampling in a reasonable time, so we just use
@@ -213,16 +146,16 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
// We will only be negative on our first count, so we should just retry in
// that case.
if (first) {
- if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
- return SampleSlow(next_sample);
+ if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
+ return SampleSlow(next_sample, inline_element_size);
}
- return HashtablezSampler::Global().Register();
+ return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
#endif
}
void UnsampleSlow(HashtablezInfo* info) {
- HashtablezSampler::Global().Unregister(info);
+ GlobalHashtablezSampler().Unregister(info);
}
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
@@ -230,7 +163,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
// SwissTables probe in groups of 16, so scale this to count items probes and
// not offset from desired.
size_t probe_length = distance_from_desired;
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
@@ -247,11 +180,33 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->size.fetch_add(1, std::memory_order_relaxed);
}
+void SetHashtablezConfigListener(HashtablezConfigListener l) {
+ g_hashtablez_config_listener.store(l, std::memory_order_release);
+}
+
+bool IsHashtablezEnabled() {
+ return g_hashtablez_enabled.load(std::memory_order_acquire);
+}
+
void SetHashtablezEnabled(bool enabled) {
+ SetHashtablezEnabledInternal(enabled);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezEnabledInternal(bool enabled) {
g_hashtablez_enabled.store(enabled, std::memory_order_release);
}
+int32_t GetHashtablezSampleParameter() {
+ return g_hashtablez_sample_parameter.load(std::memory_order_acquire);
+}
+
void SetHashtablezSampleParameter(int32_t rate) {
+ SetHashtablezSampleParameterInternal(rate);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezSampleParameterInternal(int32_t rate) {
if (rate > 0) {
g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
} else {
@@ -260,9 +215,18 @@ void SetHashtablezSampleParameter(int32_t rate) {
}
}
+int32_t GetHashtablezMaxSamples() {
+ return GlobalHashtablezSampler().GetMaxSamples();
+}
+
void SetHashtablezMaxSamples(int32_t max) {
+ SetHashtablezMaxSamplesInternal(max);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezMaxSamplesInternal(int32_t max) {
if (max > 0) {
- g_hashtablez_max_samples.store(max, std::memory_order_release);
+ GlobalHashtablezSampler().SetMaxSamples(max);
} else {
ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
static_cast<long long>(max)); // NOLINT(runtime/int)
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 85685f72..d4016d8a 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -44,9 +44,10 @@
#include <memory>
#include <vector>
+#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h"
@@ -57,7 +58,7 @@ namespace container_internal {
// Stores information about a sampled hashtable. All mutations to this *must*
// be made through `Record*` functions below. All reads from this *must* only
// occur in the callback to `HashtablezSampler::Iterate`.
-struct HashtablezInfo {
+struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Constructs the object but does not fill in any fields.
HashtablezInfo();
~HashtablezInfo();
@@ -66,7 +67,8 @@ struct HashtablezInfo {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
- void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+ void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
@@ -79,28 +81,22 @@ struct HashtablezInfo {
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
std::atomic<size_t> hashes_bitwise_xor;
-
- // `HashtablezSampler` maintains intrusive linked lists for all samples. See
- // comments on `HashtablezSampler::all_` for details on these. `init_mu`
- // guards the ability to restore the sample to a pristine state. This
- // prevents races with sampling and resurrecting an object.
- absl::Mutex init_mu;
- HashtablezInfo* next;
- HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
+ std::atomic<size_t> max_reserve;
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
- // These are guarded by init_mu, but that is not externalized to clients, who
- // can only read them during `HashtablezSampler::Iterate` which will hold the
- // lock.
+ // These are guarded by init_mu, but that is not externalized to clients,
+ // which can read them only during `SampleRecorder::Iterate` which will hold
+ // the lock.
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
+ size_t inline_element_size; // How big is the slot?
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
@@ -114,6 +110,18 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
std::memory_order_relaxed);
}
+inline void RecordReservationSlow(HashtablezInfo* info,
+ size_t target_capacity) {
+ info->max_reserve.store(
+ (std::max)(info->max_reserve.load(std::memory_order_relaxed),
+ target_capacity),
+ std::memory_order_relaxed);
+}
+
+inline void RecordClearedReservationSlow(HashtablezInfo* info) {
+ info->max_reserve.store(0, std::memory_order_relaxed);
+}
+
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
size_t capacity) {
info->size.store(size, std::memory_order_relaxed);
@@ -137,7 +145,15 @@ inline void RecordEraseSlow(HashtablezInfo* info) {
std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample);
+struct SamplingState {
+ int64_t next_sample;
+ // When we make a sampling decision, we record that distance so we can weight
+ // each sample.
+ int64_t sample_stride;
+};
+
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -177,6 +193,16 @@ class HashtablezInfoHandle {
RecordRehashSlow(info_, total_probe_length);
}
+ inline void RecordReservation(size_t target_capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordReservationSlow(info_, target_capacity);
+ }
+
+ inline void RecordClearedReservation() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordClearedReservationSlow(info_);
+ }
+
inline void RecordInsert(size_t hash, size_t distance_from_desired) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordInsertSlow(info_, hash, distance_from_desired);
@@ -206,6 +232,8 @@ class HashtablezInfoHandle {
inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {}
+ inline void RecordReservation(size_t /*target_capacity*/) {}
+ inline void RecordClearedReservation() {}
inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
inline void RecordErase() {}
@@ -215,98 +243,47 @@ class HashtablezInfoHandle {
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
-inline HashtablezInfoHandle Sample() {
+inline HashtablezInfoHandle Sample(
+ size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
- return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+ return HashtablezInfoHandle(
+ SampleSlow(global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
}
-// Holds samples and their associated stack traces with a soft limit of
-// `SetHashtablezMaxSamples()`.
-//
-// Thread safe.
-class HashtablezSampler {
- public:
- // Returns a global Sampler.
- static HashtablezSampler& Global();
-
- HashtablezSampler();
- ~HashtablezSampler();
+using HashtablezSampler =
+ ::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
- // Registers for sampling. Returns an opaque registration info.
- HashtablezInfo* Register();
+// Returns a global Sampler.
+HashtablezSampler& GlobalHashtablezSampler();
- // Unregisters the sample.
- void Unregister(HashtablezInfo* sample);
-
- // The dispose callback will be called on all samples the moment they are
- // being unregistered. Only affects samples that are unregistered after the
- // callback has been set.
- // Returns the previous callback.
- using DisposeCallback = void (*)(const HashtablezInfo&);
- DisposeCallback SetDisposeCallback(DisposeCallback f);
-
- // Iterates over all the registered `StackInfo`s. Returning the number of
- // samples that have been dropped.
- int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
-
- private:
- void PushNew(HashtablezInfo* sample);
- void PushDead(HashtablezInfo* sample);
- HashtablezInfo* PopDead();
-
- std::atomic<size_t> dropped_samples_;
- std::atomic<size_t> size_estimate_;
-
- // Intrusive lock free linked lists for tracking samples.
- //
- // `all_` records all samples (they are never removed from this list) and is
- // terminated with a `nullptr`.
- //
- // `graveyard_.dead` is a circular linked list. When it is empty,
- // `graveyard_.dead == &graveyard`. The list is circular so that
- // every item on it (even the last) has a non-null dead pointer. This allows
- // `Iterate` to determine if a given sample is live or dead using only
- // information on the sample itself.
- //
- // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
- // looks like this (G is the Graveyard):
- //
- // +---+ +---+ +---+ +---+ +---+
- // all -->| A |--->| B |--->| C |--->| D |--->| E |
- // | | | | | | | | | |
- // +---+ | | +->| |-+ | | +->| |-+ | |
- // | G | +---+ | +---+ | +---+ | +---+ | +---+
- // | | | | | |
- // | | --------+ +--------+ |
- // +---+ |
- // ^ |
- // +--------------------------------------+
- //
- std::atomic<HashtablezInfo*> all_;
- HashtablezInfo graveyard_;
-
- std::atomic<DisposeCallback> dispose_;
-};
+using HashtablezConfigListener = void (*)();
+void SetHashtablezConfigListener(HashtablezConfigListener l);
// Enables or disables sampling for Swiss tables.
+bool IsHashtablezEnabled();
void SetHashtablezEnabled(bool enabled);
+void SetHashtablezEnabledInternal(bool enabled);
// Sets the rate at which Swiss tables will be sampled.
+int32_t GetHashtablezSampleParameter();
void SetHashtablezSampleParameter(int32_t rate);
+void SetHashtablezSampleParameterInternal(int32_t rate);
// Sets a soft max for the number of samples that will be kept.
+int32_t GetHashtablezMaxSamples();
void SetHashtablezMaxSamples(int32_t max);
+void SetHashtablezMaxSamplesInternal(int32_t max);
// Configuration override.
// This allows process-wide sampling without depending on order of
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 5f4c83b7..665d518f 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -21,7 +21,8 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/base/config.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/synchronization/mutex.h"
@@ -29,7 +30,7 @@
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
constexpr int kProbeLength = 16;
#else
constexpr int kProbeLength = 8;
@@ -69,7 +70,9 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) {
}
HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
- auto* info = s->Register();
+ const int64_t test_stride = 123;
+ const size_t test_element_size = 17;
+ auto* info = s->Register(test_stride, test_element_size);
assert(info != nullptr);
info->size.store(size);
return info;
@@ -77,9 +80,11 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
+ const int64_t test_stride = 123;
+ const size_t test_element_size = 17;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
@@ -90,7 +95,10 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
+ EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
+ EXPECT_EQ(info.weight, test_stride);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
@@ -100,9 +108,10 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
info.hashes_bitwise_xor.store(1, std::memory_order_relaxed);
+ info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_stride * 2, test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -112,13 +121,18 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
+ EXPECT_EQ(info.max_reserve.load(), 0);
+ EXPECT_EQ(info.weight, 2 * test_stride);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start);
}
TEST(HashtablezInfoTest, RecordStorageChanged) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const int64_t test_stride = 21;
+ const size_t test_element_size = 19;
+ info.PrepareForSampling(test_stride, test_element_size);
RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47);
@@ -130,7 +144,9 @@ TEST(HashtablezInfoTest, RecordStorageChanged) {
TEST(HashtablezInfoTest, RecordInsert) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const int64_t test_stride = 25;
+ const size_t test_element_size = 23;
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
@@ -150,9 +166,11 @@ TEST(HashtablezInfoTest, RecordInsert) {
}
TEST(HashtablezInfoTest, RecordErase) {
+ const int64_t test_stride = 31;
+ const size_t test_element_size = 29;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -160,12 +178,15 @@ TEST(HashtablezInfoTest, RecordErase) {
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
}
TEST(HashtablezInfoTest, RecordRehash) {
+ const int64_t test_stride = 33;
+ const size_t test_element_size = 31;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_stride, test_element_size);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -184,43 +205,67 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
+}
+
+TEST(HashtablezInfoTest, RecordReservation) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ const int64_t test_stride = 35;
+ const size_t test_element_size = 33;
+ info.PrepareForSampling(test_stride, test_element_size);
+ RecordReservationSlow(&info, 3);
+ EXPECT_EQ(info.max_reserve.load(), 3);
+
+ RecordReservationSlow(&info, 2);
+ // High watermark does not change
+ EXPECT_EQ(info.max_reserve.load(), 3);
+
+ RecordReservationSlow(&info, 10);
+ // High watermark does change
+ EXPECT_EQ(info.max_reserve.load(), 10);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
- int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample);
- EXPECT_GT(next_sample, 0);
+ SamplingState next_sample = {0, 0};
+ HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+ EXPECT_GT(next_sample.next_sample, 0);
+ EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
}
TEST(HashtablezSamplerTest, LargeSampleParameter) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
- int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample);
- EXPECT_GT(next_sample, 0);
+ SamplingState next_sample = {0, 0};
+ HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+ EXPECT_GT(next_sample.next_sample, 0);
+ EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
}
TEST(HashtablezSamplerTest, Sample) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
int64_t num_sampled = 0;
int64_t total = 0;
double sample_rate = 0.0;
for (int i = 0; i < 1000000; ++i) {
- HashtablezInfoHandle h = Sample();
+ HashtablezInfoHandle h = Sample(test_element_size);
++total;
if (HashtablezInfoHandlePeer::IsSampled(h)) {
++num_sampled;
@@ -232,14 +277,17 @@ TEST(HashtablezSamplerTest, Sample) {
}
TEST(HashtablezSamplerTest, Handle) {
- auto& sampler = HashtablezSampler::Global();
- HashtablezInfoHandle h(sampler.Register());
+ auto& sampler = GlobalHashtablezSampler();
+ const int64_t test_stride = 41;
+ const size_t test_element_size = 39;
+ HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
bool found = false;
sampler.Iterate([&](const HashtablezInfo& h) {
if (&h == info) {
+ EXPECT_EQ(h.weight, test_stride);
EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
found = true;
}
@@ -305,18 +353,20 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
- pool.Schedule([&sampler, &stop]() {
+ const int64_t sampling_stride = 11 + i % 3;
+ const size_t elt_size = 10 + i % 2;
+ pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(sampling_stride, elt_size));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(sampling_stride, elt_size));
break;
}
case 1: {
@@ -325,6 +375,7 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
HashtablezInfo* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
+ EXPECT_EQ(info->weight, sampling_stride);
sampler.Unregister(info);
break;
}
diff --git a/absl/container/internal/have_sse.h b/absl/container/internal/have_sse.h
deleted file mode 100644
index e75e1a16..00000000
--- a/absl/container/internal/have_sse.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Shared config probing for SSE instructions used in Swiss tables.
-#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-
-#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#if defined(__SSE2__) || \
- (defined(_MSC_VER) && \
- (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
-#else
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
-#endif
-#endif
-
-#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#ifdef __SSSE3__
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
-#else
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
-#endif
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
- !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#error "Bad configuration!"
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
-#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index b8aec45b..54c92a01 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -21,8 +21,11 @@
#include <iterator>
#include <limits>
#include <memory>
+#include <new>
+#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
@@ -36,116 +39,145 @@ namespace inlined_vector_internal {
// GCC does not deal very well with the below code
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
+template <typename A>
+using AllocatorTraits = std::allocator_traits<A>;
+template <typename A>
+using ValueType = typename AllocatorTraits<A>::value_type;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using Pointer = typename AllocatorTraits<A>::pointer;
+template <typename A>
+using ConstPointer = typename AllocatorTraits<A>::const_pointer;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using DifferenceType = typename AllocatorTraits<A>::difference_type;
+template <typename A>
+using Reference = ValueType<A>&;
+template <typename A>
+using ConstReference = const ValueType<A>&;
+template <typename A>
+using Iterator = Pointer<A>;
+template <typename A>
+using ConstIterator = ConstPointer<A>;
+template <typename A>
+using ReverseIterator = typename std::reverse_iterator<Iterator<A>>;
+template <typename A>
+using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
+template <typename A>
+using MoveIterator = typename std::move_iterator<Iterator<A>>;
+
template <typename Iterator>
using IsAtLeastForwardIterator = std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
std::forward_iterator_tag>;
-template <typename AllocatorType,
- typename ValueType =
- typename absl::allocator_traits<AllocatorType>::value_type>
+template <typename A>
using IsMemcpyOk =
- absl::conjunction<std::is_same<AllocatorType, std::allocator<ValueType>>,
- absl::is_trivially_copy_constructible<ValueType>,
- absl::is_trivially_copy_assignable<ValueType>,
- absl::is_trivially_destructible<ValueType>>;
-
-template <typename AllocatorType, typename Pointer, typename SizeType>
-void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
- SizeType destroy_size) {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
-
- if (destroy_first != nullptr) {
- for (auto i = destroy_size; i != 0;) {
+ absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
+ absl::is_trivially_copy_constructible<ValueType<A>>,
+ absl::is_trivially_copy_assignable<ValueType<A>>,
+ absl::is_trivially_destructible<ValueType<A>>>;
+
+template <typename T>
+struct TypeIdentity {
+ using type = T;
+};
+
+// Used for function arguments in template functions to prevent ADL by forcing
+// callers to explicitly specify the template parameter.
+template <typename T>
+using NoTypeDeduction = typename TypeIdentity<T>::type;
+
+template <typename A, bool IsTriviallyDestructible =
+ absl::is_trivially_destructible<ValueType<A>>::value>
+struct DestroyAdapter;
+
+template <typename A>
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ false> {
+ static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
+ for (SizeType<A> i = destroy_size; i != 0;) {
--i;
- AllocatorTraits::destroy(*alloc_ptr, destroy_first + i);
+ AllocatorTraits<A>::destroy(allocator, destroy_first + i);
}
+ }
+};
-#if !defined(NDEBUG)
- {
- using ValueType = typename AllocatorTraits::value_type;
-
- // Overwrite unused memory with `0xab` so we can catch uninitialized
- // usage.
- //
- // Cast to `void*` to tell the compiler that we don't care that we might
- // be scribbling on a vtable pointer.
- void* memory_ptr = destroy_first;
- auto memory_size = destroy_size * sizeof(ValueType);
- std::memset(memory_ptr, 0xab, memory_size);
- }
-#endif // !defined(NDEBUG)
+template <typename A>
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ true> {
+ static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
+ static_cast<void>(allocator);
+ static_cast<void>(destroy_first);
+ static_cast<void>(destroy_size);
}
-}
+};
-// If kUseMemcpy is true, memcpy(dst, src, n); else do nothing.
-// Useful to avoid compiler warnings when memcpy() is used for T values
-// that are not trivially copyable in non-reachable code.
-template <bool kUseMemcpy>
-inline void MemcpyIfAllowed(void* dst, const void* src, size_t n);
+template <typename A>
+struct Allocation {
+ Pointer<A> data;
+ SizeType<A> capacity;
+};
-// memcpy when allowed.
-template <>
-inline void MemcpyIfAllowed<true>(void* dst, const void* src, size_t n) {
- memcpy(dst, src, n);
-}
+template <typename A,
+ bool IsOverAligned =
+ (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
+struct MallocAdapter {
+ static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
+ return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
+ requested_capacity};
+ }
-// Do nothing for types that are not memcpy-able. This function is only
-// called from non-reachable branches.
-template <>
-inline void MemcpyIfAllowed<false>(void*, const void*, size_t) {}
+ static void Deallocate(A& allocator, Pointer<A> pointer,
+ SizeType<A> capacity) {
+ AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
+ }
+};
-template <typename AllocatorType, typename Pointer, typename ValueAdapter,
- typename SizeType>
-void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
- ValueAdapter* values_ptr, SizeType construct_size) {
- for (SizeType i = 0; i < construct_size; ++i) {
- ABSL_INTERNAL_TRY {
- values_ptr->ConstructNext(alloc_ptr, construct_first + i);
- }
+template <typename A, typename ValueAdapter>
+void ConstructElements(NoTypeDeduction<A>& allocator,
+ Pointer<A> construct_first, ValueAdapter& values,
+ SizeType<A> construct_size) {
+ for (SizeType<A> i = 0; i < construct_size; ++i) {
+ ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
ABSL_INTERNAL_CATCH_ANY {
- inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i);
+ DestroyAdapter<A>::DestroyElements(allocator, construct_first, i);
ABSL_INTERNAL_RETHROW;
}
}
}
-template <typename Pointer, typename ValueAdapter, typename SizeType>
-void AssignElements(Pointer assign_first, ValueAdapter* values_ptr,
- SizeType assign_size) {
- for (SizeType i = 0; i < assign_size; ++i) {
- values_ptr->AssignNext(assign_first + i);
+template <typename A, typename ValueAdapter>
+void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
+ SizeType<A> assign_size) {
+ for (SizeType<A> i = 0; i < assign_size; ++i) {
+ values.AssignNext(assign_first + i);
}
}
-template <typename AllocatorType>
+template <typename A>
struct StorageView {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
- using SizeType = typename AllocatorTraits::size_type;
-
- Pointer data;
- SizeType size;
- SizeType capacity;
+ Pointer<A> data;
+ SizeType<A> size;
+ SizeType<A> capacity;
};
-template <typename AllocatorType, typename Iterator>
+template <typename A, typename Iterator>
class IteratorValueAdapter {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
-
public:
explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
- void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
- AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *it_);
++it_;
}
- void AssignNext(Pointer assign_at) {
+ void AssignNext(Pointer<A> assign_at) {
*assign_at = *it_;
++it_;
}
@@ -154,166 +186,123 @@ class IteratorValueAdapter {
Iterator it_;
};
-template <typename AllocatorType>
+template <typename A>
class CopyValueAdapter {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using ValueType = typename AllocatorTraits::value_type;
- using Pointer = typename AllocatorTraits::pointer;
- using ConstPointer = typename AllocatorTraits::const_pointer;
-
public:
- explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {}
+ explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
- void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
- AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
}
- void AssignNext(Pointer assign_at) { *assign_at = *ptr_; }
+ void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
private:
- ConstPointer ptr_;
+ ConstPointer<A> ptr_;
};
-template <typename AllocatorType>
+template <typename A>
class DefaultValueAdapter {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using ValueType = typename AllocatorTraits::value_type;
- using Pointer = typename AllocatorTraits::pointer;
-
public:
explicit DefaultValueAdapter() {}
- void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
- AllocatorTraits::construct(*alloc_ptr, construct_at);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at);
}
- void AssignNext(Pointer assign_at) { *assign_at = ValueType(); }
+ void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
};
-template <typename AllocatorType>
+template <typename A>
class AllocationTransaction {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
- using SizeType = typename AllocatorTraits::size_type;
-
public:
- explicit AllocationTransaction(AllocatorType* alloc_ptr)
- : alloc_data_(*alloc_ptr, nullptr) {}
+ explicit AllocationTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), capacity_(0) {}
~AllocationTransaction() {
if (DidAllocate()) {
- AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
}
}
AllocationTransaction(const AllocationTransaction&) = delete;
void operator=(const AllocationTransaction&) = delete;
- AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
- Pointer& GetData() { return alloc_data_.template get<1>(); }
- SizeType& GetCapacity() { return capacity_; }
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetCapacity() { return capacity_; }
bool DidAllocate() { return GetData() != nullptr; }
- Pointer Allocate(SizeType capacity) {
- GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
- GetCapacity() = capacity;
- return GetData();
+
+ Pointer<A> Allocate(SizeType<A> requested_capacity) {
+ Allocation<A> result =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ GetData() = result.data;
+ GetCapacity() = result.capacity;
+ return result.data;
}
+ ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+ Allocation<A> result = {GetData(), GetCapacity()};
+ Reset();
+ return result;
+ }
+
+ private:
void Reset() {
GetData() = nullptr;
GetCapacity() = 0;
}
- private:
- container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
- SizeType capacity_ = 0;
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> capacity_;
};
-template <typename AllocatorType>
+template <typename A>
class ConstructionTransaction {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
- using SizeType = typename AllocatorTraits::size_type;
-
public:
- explicit ConstructionTransaction(AllocatorType* alloc_ptr)
- : alloc_data_(*alloc_ptr, nullptr) {}
+ explicit ConstructionTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), size_(0) {}
~ConstructionTransaction() {
if (DidConstruct()) {
- inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()),
- GetData(), GetSize());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), GetData(), GetSize());
}
}
ConstructionTransaction(const ConstructionTransaction&) = delete;
void operator=(const ConstructionTransaction&) = delete;
- AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
- Pointer& GetData() { return alloc_data_.template get<1>(); }
- SizeType& GetSize() { return size_; }
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetSize() { return size_; }
bool DidConstruct() { return GetData() != nullptr; }
template <typename ValueAdapter>
- void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) {
- inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
- data, values_ptr, size);
+ void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
+ ConstructElements<A>(GetAllocator(), data, values, size);
GetData() = data;
GetSize() = size;
}
- void Commit() {
+ void Commit() && {
GetData() = nullptr;
GetSize() = 0;
}
private:
- container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
- SizeType size_ = 0;
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> size_;
};
template <typename T, size_t N, typename A>
class Storage {
public:
- using AllocatorTraits = absl::allocator_traits<A>;
- using allocator_type = typename AllocatorTraits::allocator_type;
- using value_type = typename AllocatorTraits::value_type;
- using pointer = typename AllocatorTraits::pointer;
- using const_pointer = typename AllocatorTraits::const_pointer;
- using size_type = typename AllocatorTraits::size_type;
- using difference_type = typename AllocatorTraits::difference_type;
-
- using reference = value_type&;
- using const_reference = const value_type&;
- using RValueReference = value_type&&;
- using iterator = pointer;
- using const_iterator = const_pointer;
- using reverse_iterator = std::reverse_iterator<iterator>;
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
- using MoveIterator = std::move_iterator<iterator>;
- using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
-
- using StorageView = inlined_vector_internal::StorageView<allocator_type>;
-
- template <typename Iterator>
- using IteratorValueAdapter =
- inlined_vector_internal::IteratorValueAdapter<allocator_type, Iterator>;
- using CopyValueAdapter =
- inlined_vector_internal::CopyValueAdapter<allocator_type>;
- using DefaultValueAdapter =
- inlined_vector_internal::DefaultValueAdapter<allocator_type>;
-
- using AllocationTransaction =
- inlined_vector_internal::AllocationTransaction<allocator_type>;
- using ConstructionTransaction =
- inlined_vector_internal::ConstructionTransaction<allocator_type>;
-
- static size_type NextCapacity(size_type current_capacity) {
+ static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
return current_capacity * 2;
}
- static size_type ComputeCapacity(size_type current_capacity,
- size_type requested_capacity) {
+ static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
+ SizeType<A> requested_capacity) {
return (std::max)(NextCapacity(current_capacity), requested_capacity);
}
@@ -321,15 +310,15 @@ class Storage {
// Storage Constructors and Destructor
// ---------------------------------------------------------------------------
- Storage() : metadata_(allocator_type(), /* size and is_allocated */ 0) {}
+ Storage() : metadata_(A(), /* size and is_allocated */ 0u) {}
- explicit Storage(const allocator_type& alloc)
- : metadata_(alloc, /* size and is_allocated */ 0) {}
+ explicit Storage(const A& allocator)
+ : metadata_(allocator, /* size and is_allocated */ 0u) {}
~Storage() {
if (GetSizeAndIsAllocated() == 0) {
// Empty and not allocated; nothing to do.
- } else if (IsMemcpyOk::value) {
+ } else if (IsMemcpyOk<A>::value) {
// No destructors need to be run; just deallocate if necessary.
DeallocateIfAllocated();
} else {
@@ -341,52 +330,48 @@ class Storage {
// Storage Member Accessors
// ---------------------------------------------------------------------------
- size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+ SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
- const size_type& GetSizeAndIsAllocated() const {
+ const SizeType<A>& GetSizeAndIsAllocated() const {
return metadata_.template get<1>();
}
- size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+ SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
- pointer GetAllocatedData() { return data_.allocated.allocated_data; }
+ Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
- const_pointer GetAllocatedData() const {
+ ConstPointer<A> GetAllocatedData() const {
return data_.allocated.allocated_data;
}
- pointer GetInlinedData() {
- return reinterpret_cast<pointer>(
+ Pointer<A> GetInlinedData() {
+ return reinterpret_cast<Pointer<A>>(
std::addressof(data_.inlined.inlined_data[0]));
}
- const_pointer GetInlinedData() const {
- return reinterpret_cast<const_pointer>(
+ ConstPointer<A> GetInlinedData() const {
+ return reinterpret_cast<ConstPointer<A>>(
std::addressof(data_.inlined.inlined_data[0]));
}
- size_type GetAllocatedCapacity() const {
+ SizeType<A> GetAllocatedCapacity() const {
return data_.allocated.allocated_capacity;
}
- size_type GetInlinedCapacity() const { return static_cast<size_type>(N); }
+ SizeType<A> GetInlinedCapacity() const { return static_cast<SizeType<A>>(N); }
- StorageView MakeStorageView() {
- return GetIsAllocated()
- ? StorageView{GetAllocatedData(), GetSize(),
- GetAllocatedCapacity()}
- : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()};
+ StorageView<A> MakeStorageView() {
+ return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()}
+ : StorageView<A>{GetInlinedData(), GetSize(),
+ GetInlinedCapacity()};
}
- allocator_type* GetAllocPtr() {
- return std::addressof(metadata_.template get<0>());
- }
+ A& GetAllocator() { return metadata_.template get<0>(); }
- const allocator_type* GetAllocPtr() const {
- return std::addressof(metadata_.template get<0>());
- }
+ const A& GetAllocator() const { return metadata_.template get<0>(); }
// ---------------------------------------------------------------------------
// Storage Member Mutators
@@ -395,74 +380,68 @@ class Storage {
ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
template <typename ValueAdapter>
- void Initialize(ValueAdapter values, size_type new_size);
+ void Initialize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- void Assign(ValueAdapter values, size_type new_size);
+ void Assign(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- void Resize(ValueAdapter values, size_type new_size);
+ void Resize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- iterator Insert(const_iterator pos, ValueAdapter values,
- size_type insert_count);
+ Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count);
template <typename... Args>
- reference EmplaceBack(Args&&... args);
+ Reference<A> EmplaceBack(Args&&... args);
- iterator Erase(const_iterator from, const_iterator to);
+ Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
- void Reserve(size_type requested_capacity);
+ void Reserve(SizeType<A> requested_capacity);
void ShrinkToFit();
void Swap(Storage* other_storage_ptr);
void SetIsAllocated() {
- GetSizeAndIsAllocated() |= static_cast<size_type>(1);
+ GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
}
void UnsetIsAllocated() {
- GetSizeAndIsAllocated() &= ((std::numeric_limits<size_type>::max)() - 1);
+ GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
}
- void SetSize(size_type size) {
+ void SetSize(SizeType<A> size) {
GetSizeAndIsAllocated() =
- (size << 1) | static_cast<size_type>(GetIsAllocated());
+ (size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
}
- void SetAllocatedSize(size_type size) {
- GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
+ void SetAllocatedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
}
- void SetInlinedSize(size_type size) {
- GetSizeAndIsAllocated() = size << static_cast<size_type>(1);
+ void SetInlinedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
}
- void AddSize(size_type count) {
- GetSizeAndIsAllocated() += count << static_cast<size_type>(1);
+ void AddSize(SizeType<A> count) {
+ GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
}
- void SubtractSize(size_type count) {
- assert(count <= GetSize());
+ void SubtractSize(SizeType<A> count) {
+ ABSL_HARDENING_ASSERT(count <= GetSize());
- GetSizeAndIsAllocated() -= count << static_cast<size_type>(1);
+ GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
}
- void SetAllocatedData(pointer data, size_type capacity) {
- data_.allocated.allocated_data = data;
- data_.allocated.allocated_capacity = capacity;
- }
-
- void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) {
- SetAllocatedData(allocation_tx_ptr->GetData(),
- allocation_tx_ptr->GetCapacity());
-
- allocation_tx_ptr->Reset();
+ void SetAllocation(Allocation<A> allocation) {
+ data_.allocated.allocated_data = allocation.data;
+ data_.allocated.allocated_capacity = allocation.capacity;
}
void MemcpyFrom(const Storage& other_storage) {
- assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
+ ABSL_HARDENING_ASSERT(IsMemcpyOk<A>::value ||
+ other_storage.GetIsAllocated());
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
@@ -470,24 +449,23 @@ class Storage {
void DeallocateIfAllocated() {
if (GetIsAllocated()) {
- AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
- GetAllocatedCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
+ GetAllocatedCapacity());
}
}
private:
ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
- using Metadata =
- container_internal::CompressedTuple<allocator_type, size_type>;
+ using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
struct Allocated {
- pointer allocated_data;
- size_type allocated_capacity;
+ Pointer<A> allocated_data;
+ SizeType<A> allocated_capacity;
};
struct Inlined {
- alignas(value_type) char inlined_data[sizeof(value_type[N])];
+ alignas(ValueType<A>) char inlined_data[sizeof(ValueType<A>[N])];
};
union Data {
@@ -496,7 +474,7 @@ class Storage {
};
template <typename... Args>
- ABSL_ATTRIBUTE_NOINLINE reference EmplaceBackSlow(Args&&... args);
+ ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
Metadata metadata_;
Data data_;
@@ -504,17 +482,17 @@ class Storage {
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
- pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
- inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
+ Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), data, GetSize());
DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::InitFrom(const Storage& other) {
- const auto n = other.GetSize();
- assert(n > 0); // Empty sources handled handled in caller.
- const_pointer src;
- pointer dst;
+ const SizeType<A> n = other.GetSize();
+ ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller.
+ ConstPointer<A> src;
+ Pointer<A> dst;
if (!other.GetIsAllocated()) {
dst = GetInlinedData();
src = other.GetInlinedData();
@@ -522,43 +500,48 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
- size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), n);
- dst = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
- SetAllocatedData(dst, new_capacity);
+ SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ SetAllocation(allocation);
+ dst = allocation.data;
src = other.GetAllocatedData();
}
- if (IsMemcpyOk::value) {
- MemcpyIfAllowed<IsMemcpyOk::value>(dst, src, sizeof(dst[0]) * n);
+ if (IsMemcpyOk<A>::value) {
+ std::memcpy(reinterpret_cast<char*>(dst),
+ reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
} else {
- auto values = IteratorValueAdapter<const_pointer>(src);
- inlined_vector_internal::ConstructElements(GetAllocPtr(), dst, &values, n);
+ auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
+ ConstructElements<A>(GetAllocator(), dst, values, n);
}
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
+auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
// Only callable from constructors!
- assert(!GetIsAllocated());
- assert(GetSize() == 0);
+ ABSL_HARDENING_ASSERT(!GetIsAllocated());
+ ABSL_HARDENING_ASSERT(GetSize() == 0);
- pointer construct_data;
+ Pointer<A> construct_data;
if (new_size > GetInlinedCapacity()) {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
- size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
- construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
- SetAllocatedData(construct_data, new_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(GetInlinedCapacity(), new_size);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ construct_data = allocation.data;
+ SetAllocation(allocation);
SetIsAllocated();
} else {
construct_data = GetInlinedData();
}
- inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
- &values, new_size);
+ ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
// Since the initial size was guaranteed to be `0` and the allocated bit is
// already correct for either case, *adding* `new_size` gives us the correct
@@ -568,18 +551,20 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
- AllocationTransaction allocation_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- absl::Span<value_type> assign_loop;
- absl::Span<value_type> construct_loop;
- absl::Span<value_type> destroy_loop;
+ absl::Span<ValueType<A>> assign_loop;
+ absl::Span<ValueType<A>> construct_loop;
+ absl::Span<ValueType<A>> destroy_loop;
if (new_size > storage_view.capacity) {
- size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- construct_loop = {allocation_tx.Allocate(new_capacity), new_size};
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
assign_loop = {storage_view.data, storage_view.size};
@@ -590,18 +575,17 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
}
- inlined_vector_internal::AssignElements(assign_loop.data(), &values,
- assign_loop.size());
+ AssignElements<A>(assign_loop.data(), values, assign_loop.size());
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+ ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
+ construct_loop.size());
- inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
- destroy_loop.size());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), destroy_loop.data(),
+ destroy_loop.size());
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
@@ -610,42 +594,42 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
- StorageView storage_view = MakeStorageView();
- auto* const base = storage_view.data;
- const size_type size = storage_view.size;
- auto* alloc = GetAllocPtr();
+auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
+ Pointer<A> const base = storage_view.data;
+ const SizeType<A> size = storage_view.size;
+ A& alloc = GetAllocator();
if (new_size <= size) {
// Destroy extra old elements.
- inlined_vector_internal::DestroyElements(alloc, base + new_size,
- size - new_size);
+ DestroyAdapter<A>::DestroyElements(alloc, base + new_size, size - new_size);
} else if (new_size <= storage_view.capacity) {
// Construct new elements in place.
- inlined_vector_internal::ConstructElements(alloc, base + size, &values,
- new_size - size);
+ ConstructElements<A>(alloc, base + size, values, new_size - size);
} else {
// Steps:
// a. Allocate new backing store.
// b. Construct new elements in new backing store.
- // c. Move existing elements from old backing store to now.
+ // c. Move existing elements from old backing store to new backing store.
// d. Destroy all elements in old backing store.
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
- AllocationTransaction allocation_tx(alloc);
- size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- pointer new_data = allocation_tx.Allocate(new_capacity);
+ AllocationTransaction<A> allocation_tx(alloc);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
- ConstructionTransaction construction_tx(alloc);
- construction_tx.Construct(new_data + size, &values, new_size - size);
+ ConstructionTransaction<A> construction_tx(alloc);
+ construction_tx.Construct(new_data + size, values, new_size - size);
- IteratorValueAdapter<MoveIterator> move_values((MoveIterator(base)));
- inlined_vector_internal::ConstructElements(alloc, new_data, &move_values,
- size);
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ (MoveIterator<A>(base)));
+ ConstructElements<A>(alloc, new_data, move_values, size);
- inlined_vector_internal::DestroyElements(alloc, base, size);
- construction_tx.Commit();
+ DestroyAdapter<A>::DestroyElements(alloc, base, size);
+ std::move(construction_tx).Commit();
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
SetSize(new_size);
@@ -653,76 +637,77 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
- size_type insert_count) -> iterator {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count) -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
- size_type insert_index =
- std::distance(const_iterator(storage_view.data), pos);
- size_type insert_end_index = insert_index + insert_count;
- size_type new_size = storage_view.size + insert_count;
+ SizeType<A> insert_index =
+ std::distance(ConstIterator<A>(storage_view.data), pos);
+ SizeType<A> insert_end_index = insert_index + insert_count;
+ SizeType<A> new_size = storage_view.size + insert_count;
if (new_size > storage_view.capacity) {
- AllocationTransaction allocation_tx(GetAllocPtr());
- ConstructionTransaction construction_tx(GetAllocPtr());
- ConstructionTransaction move_construciton_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ ConstructionTransaction<A> construction_tx(GetAllocator());
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- pointer new_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
- construction_tx.Construct(new_data + insert_index, &values, insert_count);
+ construction_tx.Construct(new_data + insert_index, values, insert_count);
- move_construciton_tx.Construct(new_data, &move_values, insert_index);
+ move_construction_tx.Construct(new_data, move_values, insert_index);
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), new_data + insert_end_index, &move_values,
- storage_view.size - insert_index);
+ ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
+ move_values, storage_view.size - insert_index);
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
- construction_tx.Commit();
- move_construciton_tx.Commit();
+ std::move(construction_tx).Commit();
+ std::move(move_construction_tx).Commit();
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetAllocatedSize(new_size);
- return iterator(new_data + insert_index);
+ return Iterator<A>(new_data + insert_index);
} else {
- size_type move_construction_destination_index =
+ SizeType<A> move_construction_destination_index =
(std::max)(insert_end_index, storage_view.size);
- ConstructionTransaction move_construction_tx(GetAllocPtr());
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_construction_values(
- MoveIterator(storage_view.data +
- (move_construction_destination_index - insert_count)));
- absl::Span<value_type> move_construction = {
+ IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
+ MoveIterator<A>(storage_view.data +
+ (move_construction_destination_index - insert_count)));
+ absl::Span<ValueType<A>> move_construction = {
storage_view.data + move_construction_destination_index,
new_size - move_construction_destination_index};
- pointer move_assignment_values = storage_view.data + insert_index;
- absl::Span<value_type> move_assignment = {
+ Pointer<A> move_assignment_values = storage_view.data + insert_index;
+ absl::Span<ValueType<A>> move_assignment = {
storage_view.data + insert_end_index,
move_construction_destination_index - insert_end_index};
- absl::Span<value_type> insert_assignment = {move_assignment_values,
- move_construction.size()};
+ absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
+ move_construction.size()};
- absl::Span<value_type> insert_construction = {
+ absl::Span<ValueType<A>> insert_construction = {
insert_assignment.data() + insert_assignment.size(),
insert_count - insert_assignment.size()};
move_construction_tx.Construct(move_construction.data(),
- &move_construction_values,
+ move_construction_values,
move_construction.size());
- for (pointer destination = move_assignment.data() + move_assignment.size(),
- last_destination = move_assignment.data(),
- source = move_assignment_values + move_assignment.size();
+ for (Pointer<A>
+ destination = move_assignment.data() + move_assignment.size(),
+ last_destination = move_assignment.data(),
+ source = move_assignment_values + move_assignment.size();
;) {
--destination;
--source;
@@ -730,30 +715,29 @@ auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
*destination = std::move(*source);
}
- inlined_vector_internal::AssignElements(insert_assignment.data(), &values,
- insert_assignment.size());
+ AssignElements<A>(insert_assignment.data(), values,
+ insert_assignment.size());
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), insert_construction.data(), &values,
- insert_construction.size());
+ ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
+ insert_construction.size());
- move_construction_tx.Commit();
+ std::move(move_construction_tx).Commit();
AddSize(insert_count);
- return iterator(storage_view.data + insert_index);
+ return Iterator<A>(storage_view.data + insert_index);
}
}
template <typename T, size_t N, typename A>
template <typename... Args>
-auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
- StorageView storage_view = MakeStorageView();
- const auto n = storage_view.size;
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ const SizeType<A> n = storage_view.size;
if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
// Fast path; new element fits.
- pointer last_ptr = storage_view.data + n;
- AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
- std::forward<Args>(args)...);
+ Pointer<A> last_ptr = storage_view.data + n;
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
AddSize(1);
return *last_ptr;
}
@@ -763,130 +747,132 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
template <typename T, size_t N, typename A>
template <typename... Args>
-auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> reference {
- StorageView storage_view = MakeStorageView();
- AllocationTransaction allocation_tx(GetAllocPtr());
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
- size_type new_capacity = NextCapacity(storage_view.capacity);
- pointer construct_data = allocation_tx.Allocate(new_capacity);
- pointer last_ptr = construct_data + storage_view.size;
+auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
+ SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
+ Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
+ Pointer<A> last_ptr = construct_data + storage_view.size;
// Construct new element.
- AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
- std::forward<Args>(args)...);
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
// Move elements from old backing store to new backing store.
ABSL_INTERNAL_TRY {
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), allocation_tx.GetData(), &move_values,
- storage_view.size);
+ ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
+ storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
- AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
+ AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
// Destroy elements in old backing store.
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
AddSize(1);
return *last_ptr;
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
- -> iterator {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
+ -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
- size_type erase_size = std::distance(from, to);
- size_type erase_index =
- std::distance(const_iterator(storage_view.data), from);
- size_type erase_end_index = erase_index + erase_size;
+ SizeType<A> erase_size = std::distance(from, to);
+ SizeType<A> erase_index =
+ std::distance(ConstIterator<A>(storage_view.data), from);
+ SizeType<A> erase_end_index = erase_index + erase_size;
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data + erase_end_index));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data + erase_end_index));
- inlined_vector_internal::AssignElements(storage_view.data + erase_index,
- &move_values,
- storage_view.size - erase_end_index);
+ AssignElements<A>(storage_view.data + erase_index, move_values,
+ storage_view.size - erase_end_index);
- inlined_vector_internal::DestroyElements(
- GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
+ DestroyAdapter<A>::DestroyElements(
+ GetAllocator(), storage_view.data + (storage_view.size - erase_size),
erase_size);
SubtractSize(erase_size);
- return iterator(storage_view.data + erase_index);
+ return Iterator<A>(storage_view.data + erase_index);
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
+ StorageView<A> storage_view = MakeStorageView();
if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
- AllocationTransaction allocation_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- size_type new_capacity =
+ SizeType<A> new_requested_capacity =
ComputeCapacity(storage_view.capacity, requested_capacity);
- pointer new_data = allocation_tx.Allocate(new_capacity);
+ Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
- inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
- &move_values, storage_view.size);
+ ConstructElements<A>(GetAllocator(), new_data, move_values,
+ storage_view.size);
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::ShrinkToFit() -> void {
// May only be called on allocated instances!
- assert(GetIsAllocated());
+ ABSL_HARDENING_ASSERT(GetIsAllocated());
- StorageView storage_view{GetAllocatedData(), GetSize(),
- GetAllocatedCapacity()};
+ StorageView<A> storage_view{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
- AllocationTransaction allocation_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- pointer construct_data;
+ Pointer<A> construct_data;
if (storage_view.size > GetInlinedCapacity()) {
- size_type new_capacity = storage_view.size;
- construct_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity = storage_view.size;
+ construct_data = allocation_tx.Allocate(requested_capacity);
+ if (allocation_tx.GetCapacity() >= storage_view.capacity) {
+ // Already using the smallest available heap allocation.
+ return;
+ }
} else {
construct_data = GetInlinedData();
}
ABSL_INTERNAL_TRY {
- inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
- &move_values, storage_view.size);
+ ConstructElements<A>(GetAllocator(), construct_data, move_values,
+ storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
- SetAllocatedData(storage_view.data, storage_view.capacity);
+ SetAllocation({storage_view.data, storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
- AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
- storage_view.capacity);
+ MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
+ storage_view.capacity);
if (allocation_tx.DidAllocate()) {
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
} else {
UnsetIsAllocated();
}
@@ -895,7 +881,7 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
using std::swap;
- assert(this != other_storage_ptr);
+ ABSL_HARDENING_ASSERT(this != other_storage_ptr);
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
swap(data_.allocated, other_storage_ptr->data_.allocated);
@@ -904,20 +890,20 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
Storage* large_ptr = other_storage_ptr;
if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
- for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
+ for (SizeType<A> i = 0; i < small_ptr->GetSize(); ++i) {
swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
}
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(large_ptr->GetInlinedData() + small_ptr->GetSize()));
- inlined_vector_internal::ConstructElements(
- large_ptr->GetAllocPtr(),
- small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
- large_ptr->GetSize() - small_ptr->GetSize());
+ ConstructElements<A>(large_ptr->GetAllocator(),
+ small_ptr->GetInlinedData() + small_ptr->GetSize(),
+ move_values,
+ large_ptr->GetSize() - small_ptr->GetSize());
- inlined_vector_internal::DestroyElements(
- large_ptr->GetAllocPtr(),
+ DestroyAdapter<A>::DestroyElements(
+ large_ptr->GetAllocator(),
large_ptr->GetInlinedData() + small_ptr->GetSize(),
large_ptr->GetSize() - small_ptr->GetSize());
} else {
@@ -925,37 +911,37 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
Storage* inlined_ptr = other_storage_ptr;
if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
- StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
- allocated_ptr->GetSize(),
- allocated_ptr->GetAllocatedCapacity()};
+ StorageView<A> allocated_storage_view{
+ allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
+ allocated_ptr->GetAllocatedCapacity()};
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(inlined_ptr->GetInlinedData()));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(inlined_ptr->GetInlinedData()));
ABSL_INTERNAL_TRY {
- inlined_vector_internal::ConstructElements(
- inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
- &move_values, inlined_ptr->GetSize());
+ ConstructElements<A>(inlined_ptr->GetAllocator(),
+ allocated_ptr->GetInlinedData(), move_values,
+ inlined_ptr->GetSize());
}
ABSL_INTERNAL_CATCH_ANY {
- allocated_ptr->SetAllocatedData(allocated_storage_view.data,
- allocated_storage_view.capacity);
+ allocated_ptr->SetAllocation(Allocation<A>{
+ allocated_storage_view.data, allocated_storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
- inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
- inlined_ptr->GetInlinedData(),
- inlined_ptr->GetSize());
+ DestroyAdapter<A>::DestroyElements(inlined_ptr->GetAllocator(),
+ inlined_ptr->GetInlinedData(),
+ inlined_ptr->GetSize());
- inlined_ptr->SetAllocatedData(allocated_storage_view.data,
- allocated_storage_view.capacity);
+ inlined_ptr->SetAllocation(Allocation<A>{allocated_storage_view.data,
+ allocated_storage_view.capacity});
}
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
- swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+ swap(GetAllocator(), other_storage_ptr->GetAllocator());
}
-// End ignore "maybe-uninitialized"
+// End ignore "array-bounds"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index 1d7158ff..54e5d5bb 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -1350,7 +1350,13 @@ TEST(Layout, CustomAlignment) {
TEST(Layout, OverAligned) {
constexpr size_t M = alignof(max_align_t);
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+#ifdef __GNUC__
+ // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug:
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357
+ __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()];
+#else
alignas(2 * M) unsigned char p[x.AllocSize()];
+#endif
EXPECT_EQ(2 * M + 3, x.AllocSize());
EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
}
diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_slot_policy.h
index 4617162f..baba5743 100644
--- a/absl/container/internal/node_hash_policy.h
+++ b/absl/container/internal/node_slot_policy.h
@@ -30,8 +30,8 @@
// It may also optionally define `value()` and `apply()`. For documentation on
// these, see hash_policy_traits.h.
-#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
-#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
#include <cassert>
#include <cstddef>
@@ -46,7 +46,7 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class Reference, class Policy>
-struct node_hash_policy {
+struct node_slot_policy {
static_assert(std::is_lvalue_reference<Reference>::value, "");
using slot_type = typename std::remove_cv<
@@ -89,4 +89,4 @@ struct node_hash_policy {
ABSL_NAMESPACE_END
} // namespace absl
-#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_slot_policy_test.cc
index 84aabba9..51b7467b 100644
--- a/absl/container/internal/node_hash_policy_test.cc
+++ b/absl/container/internal/node_slot_policy_test.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
#include <memory>
@@ -27,7 +27,7 @@ namespace {
using ::testing::Pointee;
-struct Policy : node_hash_policy<int&, Policy> {
+struct Policy : node_slot_policy<int&, Policy> {
using key_type = int;
using init_type = int;
diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h
index 0a02757d..c7df2efc 100644
--- a/absl/container/internal/raw_hash_map.h
+++ b/absl/container/internal/raw_hash_map.h
@@ -51,8 +51,9 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
using key_arg = typename KeyArgImpl::template type<K, key_type>;
static_assert(!std::is_reference<key_type>::value, "");
- // TODO(alkis): remove this assertion and verify that reference mapped_type is
- // supported.
+
+ // TODO(b/187807849): Evaluate whether to support reference mapped_type and
+ // remove this assertion if/when it is supported.
static_assert(!std::is_reference<mapped_type>::value, "");
using iterator = typename raw_hash_map::raw_hash_set::iterator;
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index bfef071f..c63a2e02 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -23,7 +23,17 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
+alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
+ ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
+#endif
// Returns "random" seed.
inline size_t RandomSeed() {
@@ -37,24 +47,24 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
-bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
}
-void ConvertDeletedToEmptyAndFullToDeleted(
- ctrl_t* ctrl, size_t capacity) {
- assert(ctrl[capacity] == kSentinel);
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
+ assert(ctrl[capacity] == ctrl_t::kSentinel);
assert(IsValidCapacity(capacity));
- for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
+ for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
- std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
- ctrl[capacity] = kSentinel;
+ std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
}
-
+// Extern template instantiotion for inline function.
+template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 8615de8b..ea912f83 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -53,40 +53,121 @@
//
// IMPLEMENTATION DETAILS
//
-// The table stores elements inline in a slot array. In addition to the slot
-// array the table maintains some control state per slot. The extra state is one
-// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
-// the hash of an occupied slot. The table is split into logical groups of
-// slots, like so:
+// # Table Layout
+//
+// A raw_hash_set's backing array consists of control bytes followed by slots
+// that may or may not contain objects.
+//
+// The layout of the backing array, for `capacity` slots, is thus, as a
+// pseudo-struct:
+//
+// struct BackingArray {
+// // Control bytes for the "real" slots.
+// ctrl_t ctrl[capacity];
+// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
+// // stop and serves no other purpose.
+// ctrl_t sentinel;
+// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
+// // that if a probe sequence picks a value near the end of `ctrl`,
+// // `Group` will have valid control bytes to look at.
+// ctrl_t clones[kWidth - 1];
+// // The actual slot data.
+// slot_type slots[capacity];
+// };
+//
+// The length of this array is computed by `AllocSize()` below.
+//
+// Control bytes (`ctrl_t`) are bytes (collected into groups of a
+// platform-specific size) that define the state of the corresponding slot in
+// the slot array. Group manipulation is tightly optimized to be as efficient
+// as possible: SSE and friends on x86, clever bit operations on other arches.
//
// Group 1 Group 2 Group 3
// +---------------+---------------+---------------+
// | | | | | | | | | | | | | | | | | | | | | | | | |
// +---------------+---------------+---------------+
//
-// On lookup the hash is split into two parts:
-// - H2: 7 bits (those stored in the control bytes)
-// - H1: the rest of the bits
-// The groups are probed using H1. For each group the slots are matched to H2 in
-// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
-// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+// Each control byte is either a special value for empty slots, deleted slots
+// (sometimes called *tombstones*), and a special end-of-table marker used by
+// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
+// corresponding slot.
+//
+// Storing control bytes in a separate array also has beneficial cache effects,
+// since more logical slots will fit into a cache line.
+//
+// # Hashing
+//
+// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
+// `H1(hash(x))` is an index into `slots`, and essentially the starting point
+// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
+// objects that cannot possibly be the one we are looking for.
+//
+// # Table operations.
+//
+// The key operations are `insert`, `find`, and `erase`.
+//
+// Since `insert` and `erase` are implemented in terms of `find`, we describe
+// `find` first. To `find` a value `x`, we compute `hash(x)`. From
+// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
+// group of slots in some interesting order.
//
-// On insert, once the right group is found (as in lookup), its slots are
-// filled in order.
+// We now walk through these indices. At each index, we select the entire group
+// starting with that index and extract potential candidates: occupied slots
+// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
+// group, we stop and return an error. Each candidate slot `y` is compared with
+// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
+// next probe index. Tombstones effectively behave like full slots that never
+// match the value we're looking for.
//
-// On erase a slot is cleared. In case the group did not have any empty slots
-// before the erase, the erased slot is marked as deleted.
+// The `H2` bits ensure when we compare a slot to an object with `==`, we are
+// likely to have actually found the object. That is, the chance is low that
+// `==` is called and returns `false`. Thus, when we search for an object, we
+// are unlikely to call `==` many times. This likelyhood can be analyzed as
+// follows (assuming that H2 is a random enough hash function).
//
-// Groups without empty slots (but maybe with deleted slots) extend the probe
-// sequence. The probing algorithm is quadratic. Given N the number of groups,
-// the probing function for the i'th probe is:
+// Let's assume that there are `k` "wrong" objects that must be examined in a
+// probe sequence. For example, when doing a `find` on an object that is in the
+// table, `k` is the number of objects between the start of the probe sequence
+// and the final found object (not including the final found object). The
+// expected number of objects with an H2 match is then `k/128`. Measurements
+// and analysis indicate that even at high load factors, `k` is less than 32,
+// meaning that the number of "false positive" comparisons we must perform is
+// less than 1/8 per `find`.
+
+// `insert` is implemented in terms of `unchecked_insert`, which inserts a
+// value presumed to not be in the table (violating this requirement will cause
+// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
+// it, we construct a `probe_seq` once again, and use it to find the first
+// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
+// first such slot in the group and mark it as full with `x`'s H2.
//
-// P(0) = H1 % N
+// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
+// perform a `find` to see if it's already present; if it is, we're done. If
+// it's not, we may decide the table is getting overcrowded (i.e. the load
+// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
+// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
+// each element of the table into the new array (we know that no insertion here
+// will insert an already-present value), and discard the old backing array. At
+// this point, we may `unchecked_insert` the value `x`.
//
-// P(i) = (P(i - 1) + i) % N
+// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
+// presents a viable, initialized slot pointee to the caller.
//
-// This probing function guarantees that after N probes, all the groups of the
-// table will be probed exactly once.
+// `erase` is implemented in terms of `erase_at`, which takes an index to a
+// slot. Given an offset, we simply create a tombstone and destroy its contents.
+// If we can prove that the slot would not appear in a probe sequence, we can
+// make the slot as empty, instead. We can prove this by observing that if a
+// group has any empty slots, it has never been full (assuming we never create
+// an empty slot in a group with no empties, which this heuristic guarantees we
+// never do) and find would stop at this group anyways (since it does not probe
+// beyond groups with empties).
+//
+// `erase` is `erase_at` composed with `find`: if we
+// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
+// slot.
+//
+// To iterate, we simply traverse the array, skipping empty and deleted slots
+// and stopping when we hit a `kSentinel`.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
@@ -102,7 +183,9 @@
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/internal/prefetch.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/common.h"
@@ -111,13 +194,27 @@
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
#include "absl/container/internal/hashtablez_sampler.h"
-#include "absl/container/internal/have_sse.h"
-#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#include <arm_neon.h>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -132,14 +229,40 @@ template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
+// The state for a probe sequence.
+//
+// Currently, the sequence is a triangular progression of the form
+//
+// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
+//
+// The use of `Width` ensures that each probe step does not overlap groups;
+// the sequence effectively outputs the addresses of *groups* (although not
+// necessarily aligned to any boundary). The `Group` machinery allows us
+// to check an entire group with minimal branching.
+//
+// Wrapping around at `mask + 1` is important, but not for the obvious reason.
+// As described above, the first few entries of the control byte array
+// are mirrored at the end of the array, which `Group` will find and use
+// for selecting candidates. However, when those candidates' slots are
+// actually inspected, there are no corresponding slots for the cloned bytes,
+// so we need to make sure we've treated those offsets as "wrapping around".
+//
+// It turns out that this probe sequence visits every group exactly once if the
+// number of groups is a power of two, since (i^2+i)/2 is a bijection in
+// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
template <size_t Width>
class probe_seq {
public:
+ // Creates a new probe sequence using `hash` as the initial value of the
+ // sequence and `mask` (usually the capacity of the table) as the mask to
+ // apply to each value in the progression.
probe_seq(size_t hash, size_t mask) {
assert(((mask + 1) & mask) == 0 && "not a mask");
mask_ = mask;
offset_ = hash & mask_;
}
+
+ // The offset within the table, i.e., the value `p(i)` above.
size_t offset() const { return offset_; }
size_t offset(size_t i) const { return (offset_ + i) & mask_; }
@@ -148,7 +271,7 @@ class probe_seq {
offset_ += index_;
offset_ &= mask_;
}
- // 0-based probe index. The i-th probe in the probe sequence.
+ // 0-based probe index, a multiple of `Width`.
size_t index() const { return index_; }
private:
@@ -172,9 +295,9 @@ struct IsDecomposable : std::false_type {};
template <class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable<
- absl::void_t<decltype(
- Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
- std::declval<Ts>()...))>,
+ absl::void_t<decltype(Policy::apply(
+ RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
@@ -190,57 +313,84 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
template <typename T>
uint32_t TrailingZeros(T x) {
- ABSL_INTERNAL_ASSUME(x != 0);
- return countr_zero(x);
+ ABSL_ASSUME(x != 0);
+ return static_cast<uint32_t>(countr_zero(x));
}
-// An abstraction over a bitmask. It provides an easy way to iterate through the
-// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
-// this is a true bitmask. On non-SSE, platforms the arithematic used to
-// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
-// either 0x00 or 0x80.
+// An abstract bitmask, such as that emitted by a SIMD instruction.
//
-// For example:
-// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
-// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+// Specifically, this type implements a simple bitset whose representation is
+// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
+// of abstract bits in the bitset, while `Shift` is the log-base-two of the
+// width of an abstract bit in the representation.
+// This mask provides operations for any number of real bits set in an abstract
+// bit. To add iteration on top of that, implementation must guarantee no more
+// than one real bit is set in an abstract bit.
template <class T, int SignificantBits, int Shift = 0>
-class BitMask {
- static_assert(std::is_unsigned<T>::value, "");
- static_assert(Shift == 0 || Shift == 3, "");
-
+class NonIterableBitMask {
public:
- // These are useful for unit tests (gunit).
- using value_type = int;
- using iterator = BitMask;
- using const_iterator = BitMask;
+ explicit NonIterableBitMask(T mask) : mask_(mask) {}
- explicit BitMask(T mask) : mask_(mask) {}
- BitMask& operator++() {
- mask_ &= (mask_ - 1);
- return *this;
- }
- explicit operator bool() const { return mask_ != 0; }
- int operator*() const { return LowestBitSet(); }
+ explicit operator bool() const { return this->mask_ != 0; }
+
+ // Returns the index of the lowest *abstract* bit set in `self`.
uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
+
+ // Returns the index of the highest *abstract* bit set in `self`.
uint32_t HighestBitSet() const {
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
- BitMask begin() const { return *this; }
- BitMask end() const { return BitMask(0); }
-
+ // Return the number of trailing zero *abstract* bits.
uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
+ // Return the number of leading zero *abstract* bits.
uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return countl_zero(mask_ << extra_bits) >> Shift;
+ return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
}
+ T mask_;
+};
+
+// Mask that can be iterable
+//
+// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
+// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
+// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
+// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
+ using Base = NonIterableBitMask<T, SignificantBits, Shift>;
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+ explicit BitMask(T mask) : Base(mask) {}
+ // BitMask is an iterator over the indices of its abstract bits.
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ BitMask& operator++() {
+ this->mask_ &= (this->mask_ - 1);
+ return *this;
+ }
+
+ uint32_t operator*() const { return Base::LowestBitSet(); }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
private:
friend bool operator==(const BitMask& a, const BitMask& b) {
return a.mask_ == b.mask_;
@@ -248,75 +398,127 @@ class BitMask {
friend bool operator!=(const BitMask& a, const BitMask& b) {
return a.mask_ != b.mask_;
}
-
- T mask_;
};
-using ctrl_t = signed char;
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
// below for details.
-enum Ctrl : ctrl_t {
+
+// A `ctrl_t` is a single control byte, which can have one of four
+// states: empty, deleted, full (which has an associated seven-bit h2_t value)
+// and the sentinel. They have the following bit patterns:
+//
+// empty: 1 0 0 0 0 0 0 0
+// deleted: 1 1 1 1 1 1 1 0
+// full: 0 h h h h h h h // h represents the hash bits.
+// sentinel: 1 1 1 1 1 1 1 1
+//
+// These values are specifically tuned for SSE-flavored SIMD.
+// The static_asserts below detail the source of these choices.
+//
+// We use an enum class so that when strict aliasing is enabled, the compiler
+// knows ctrl_t doesn't alias other types.
+enum class ctrl_t : int8_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
kSentinel = -1, // 0b11111111
};
static_assert(
- kEmpty & kDeleted & kSentinel & 0x80,
+ (static_cast<int8_t>(ctrl_t::kEmpty) &
+ static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
"Special markers need to have the MSB to make checking for them efficient");
-static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
- "kEmpty and kDeleted must be smaller than kSentinel to make the "
- "SIMD test of IsEmptyOrDeleted() efficient");
-static_assert(kSentinel == -1,
- "kSentinel must be -1 to elide loading it from memory into SIMD "
- "registers (pcmpeqd xmm, xmm)");
-static_assert(kEmpty == -128,
- "kEmpty must be -128 to make the SIMD check for its "
+static_assert(
+ ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
+ "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(
+ ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
+ "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
+ "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
"existence efficient (psignb xmm, xmm)");
-static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
- "kEmpty and kDeleted must share an unset bit that is not shared "
- "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
- "efficient");
-static_assert(kDeleted == -2,
- "kDeleted must be -2 to make the implementation of "
+static_assert(
+ (~static_cast<int8_t>(ctrl_t::kEmpty) &
+ ~static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
+ "shared by ctrl_t::kSentinel to make the scalar test for "
+ "MaskEmptyOrDeleted() efficient");
+static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
+ "ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
+ABSL_DLL extern const ctrl_t kEmptyGroup[16];
+
+// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
- alignas(16) static constexpr ctrl_t empty_group[] = {
- kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
- kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
- return const_cast<ctrl_t*>(empty_group);
+ // Const must be cast away here; no uses of this function will actually write
+ // to it, because it is only used for empty tables.
+ return const_cast<ctrl_t*>(kEmptyGroup);
}
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
// randomize insertion order within groups.
-bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
-// Returns a hash seed.
+// Returns a per-table, hash salt, which changes on resize. This gets mixed into
+// H1 to randomize iteration order per-table.
//
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
// non-determinism of iteration order in most cases.
-inline size_t HashSeed(const ctrl_t* ctrl) {
+inline size_t PerTableSalt(const ctrl_t* ctrl) {
// The low bits of the pointer have little or no entropy because of
// alignment. We shift the pointer to try to use higher entropy bits. A
// good number seems to be 12 bits, because that aligns with page size.
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
}
-
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ HashSeed(ctrl);
+ return (hash >> 7) ^ PerTableSalt(ctrl);
}
-inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
-inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
-inline bool IsFull(ctrl_t c) { return c >= 0; }
-inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
-inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// These are used as an occupied control byte.
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+// Helpers for checking the state of a control byte.
+inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
+
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+// Quick reference guide for intrinsics used below:
+//
+// * __m128i: An XMM (128-bit) word.
+//
+// * _mm_setzero_si128: Returns a zero vector.
+// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
+//
+// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
+// * _mm_and_si128: Ands two i128s together.
+// * _mm_or_si128: Ors two i128s together.
+// * _mm_andnot_si128: And-nots two i128s together.
+//
+// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
+// filling each lane with 0x00 or 0xff.
+// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
+//
+// * _mm_loadu_si128: Performs an unaligned load of an i128.
+// * _mm_storeu_si128: Performs an unaligned store of an i128.
+//
+// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
+// argument if the corresponding lane of the second
+// argument is positive, negative, or zero, respectively.
+// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
+// bitmask consisting of those bits.
+// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
+// four bits of each i8 lane in the second argument as
+// indices.
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -345,30 +547,32 @@ struct GroupSse2Impl {
BitMask<uint32_t, kWidth> Match(h2_t hash) const {
auto match = _mm_set1_epi8(hash);
return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
}
// Returns a bitmask representing the positions of empty slots.
- BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
- // This only works because kEmpty is -128.
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+ NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+ // This only works because ctrl_t::kEmpty is -128.
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
#else
- return Match(static_cast<h2_t>(kEmpty));
+ auto match = _mm_set1_epi8(static_cast<h2_t>(ctrl_t::kEmpty));
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
#endif
}
// Returns a bitmask representing the positions of empty or deleted slots.
- BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(kSentinel);
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
+ return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(kSentinel);
+ auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
return TrailingZeros(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
@@ -376,7 +580,7 @@ struct GroupSse2Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -390,6 +594,63 @@ struct GroupSse2Impl {
};
#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+struct GroupAArch64Impl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupAArch64Impl(const ctrl_t* pos) {
+ ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
+ }
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ uint8x8_t dup = vdup_n_u8(hash);
+ auto mask = vceq_u8(ctrl, dup);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>(
+ vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(
+ vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
+ // so we should be fine.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return countr_zero((mask | ~(mask >> 7)) & bits) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = mask & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint8x8_t ctrl;
+};
+#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
+
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -403,7 +664,7 @@ struct GroupPortableImpl {
//
// Caveat: there are false positives but:
// - they only occur if there is a real match
- // - they never occur on kEmpty, kDeleted, kSentinel
+ // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
// - they will be handled gracefully by subsequent checks in code
//
// Example:
@@ -416,19 +677,23 @@ struct GroupPortableImpl {
return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
+ msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
+ msbs);
}
uint32_t CountLeadingEmptyOrDeleted() const {
- constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
- return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3;
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@@ -442,28 +707,40 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
using Group = GroupSse2Impl;
+#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+using Group = GroupAArch64Impl;
#else
using Group = GroupPortableImpl;
#endif
+// Returns he number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+// Applies the following mapping to every byte in the control array:
+// * kDeleted -> kEmpty
+// * kEmpty -> kEmpty
+// * _ -> kDeleted
// PRECONDITION:
// IsValidCapacity(capacity)
-// ctrl[capacity] == kSentinel
-// ctrl[i] != kSentinel for all i < capacity
-// Applies mapping for every byte in ctrl:
-// DELETED -> EMPTY
-// EMPTY -> EMPTY
-// FULL -> DELETED
+// ctrl[capacity] == ctrl_t::kSentinel
+// ctrl[i] != ctrl_t::kSentinel for all i < capacity
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
-// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+// Converts `n` into the next valid capacity, per `IsValidCapacity`.
inline size_t NormalizeCapacity(size_t n) {
return n ? ~size_t{} >> countl_zero(n) : 1;
}
@@ -476,8 +753,8 @@ inline size_t NormalizeCapacity(size_t n) {
// never need to probe (the whole table fits in one group) so we don't need a
// load factor less than 1.
-// Given `capacity` of the table, returns the size (i.e. number of full slots)
-// at which we should grow the capacity.
+// Given `capacity`, applies the load factor; i.e., it returns the maximum
+// number of values we should put into the table before a resizing rehash.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -487,8 +764,12 @@ inline size_t CapacityToGrowth(size_t capacity) {
}
return capacity - capacity / 8;
}
-// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and requires NormalizeCapacity().
+
+// Given `growth`, "unapplies" the load factor to find how large the capacity
+// should be to stay within the load factor.
+//
+// This might not be a valid capacity and `NormalizeCapacity()` should be
+// called on this.
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -498,16 +779,31 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
}
-inline void AssertIsFull(ctrl_t* ctrl) {
- ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
+template <class InputIter>
+size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
+ size_t bucket_count) {
+ if (bucket_count != 0) {
+ return bucket_count;
+ }
+ using InputIterCategory =
+ typename std::iterator_traits<InputIter>::iterator_category;
+ if (std::is_base_of<std::random_access_iterator_tag,
+ InputIterCategory>::value) {
+ return GrowthToLowerboundCapacity(
+ static_cast<size_t>(std::distance(first, last)));
+ }
+ return 0;
}
+#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \
+ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg)
+
inline void AssertIsValid(ctrl_t* ctrl) {
- ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
+ ABSL_HARDENING_ASSERT(
+ (ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, the table might have rehashed, or this may "
+ "be an end() iterator.");
}
struct FindInfo {
@@ -515,42 +811,40 @@ struct FindInfo {
size_t probe_length;
};
-// The representation of the object has two modes:
-// - small: For capacities < kWidth-1
-// - large: For the rest.
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
//
-// Differences:
-// - In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
//
-// - In small mode only the first `capacity()` control bytes after the
-// sentinel are valid. The rest contain dummy kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// find_first_non_full(), where we never try ShouldInsertBackwards() for
-// small tables.
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// `find_first_non_full()`, where we never try
+// `ShouldInsertBackwards()` for small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
-inline probe_seq<Group::kWidth> probe(ctrl_t* ctrl, size_t hash,
+// Begins a probing operation on `ctrl`, using `hash`.
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
-// Probes the raw_hash_set with the probe sequence for hash and returns the
-// pointer to the first empty or deleted slot.
-// NOTE: this function must work with tables having both kEmpty and kDelete
-// in one group. Such tables appears during drop_deletes_without_resize.
+// Probes an array of control bits using a probe sequence derived from `hash`,
+// and returns the offset corresponding to the first deleted or empty slot.
+//
+// Behavior when the entire table is full is undefined.
//
-// This function is very useful when insertions happen and:
-// - the input is already a set
-// - there are enough slots
-// - the element with the hash is not in the table
-inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
+// NOTE: this function must work with tables having both empty and deleted
+// slots in the same group. Such tables appear during `erase()`.
+template <typename = void>
+inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
auto seq = probe(ctrl, hash, capacity);
while (true) {
Group g{ctrl + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
+ auto mask = g.MaskEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
@@ -564,10 +858,66 @@ inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
- assert(seq.index() < capacity && "full table!");
+ assert(seq.index() <= capacity && "full table!");
+ }
+}
+
+// Extern template for inline function keep possibility of inlining.
+// When compiler decided to not inline, no symbols will be added to the
+// corresponding translation unit.
+extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+
+// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
+// array as marked as empty.
+inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
+ size_t slot_size) {
+ std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+ capacity + 1 + NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
+ SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
+}
+
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
+inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ assert(i < capacity);
+
+ auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
+ if (IsFull(h)) {
+ SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
+ } else {
+ SanitizerPoisonMemoryRegion(slot_i, slot_size);
}
+
+ ctrl[i] = h;
+ ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
+inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
+}
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+ assert(IsValidCapacity(capacity));
+ const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+ return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
+}
+
+// Given the capacity of a table, computes the total size of the backing
+// array.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+ return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
+// A SwissTable.
+//
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -624,13 +974,6 @@ class raw_hash_set {
auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
- using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
-
- static Layout MakeLayout(size_t capacity) {
- assert(IsValidCapacity(capacity));
- return Layout(capacity + Group::kWidth + 1, capacity);
- }
-
using AllocTraits = absl::allocator_traits<allocator_type>;
using SlotAlloc = typename absl::allocator_traits<
allocator_type>::template rebind_alloc<slot_type>;
@@ -689,16 +1032,22 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- AssertIsFull(ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator*() called on invalid iterator.");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
- pointer operator->() const { return &operator*(); }
+ pointer operator->() const {
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator-> called on invalid iterator.");
+ return &operator*();
+ }
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- AssertIsFull(ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator++ called on invalid iterator.");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -724,16 +1073,20 @@ class raw_hash_set {
iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
// This assumption helps the compiler know that any non-end iterator is
// not equal to any end iterator.
- ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+ ABSL_ASSUME(ctrl != nullptr);
}
+ // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
+ // they reach one.
+ //
+ // If a sentinel is reached, we null both of them out instead.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
- if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
+ if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
ctrl_t* ctrl_ = nullptr;
@@ -814,7 +1167,8 @@ class raw_hash_set {
raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : raw_hash_set(bucket_count, hash, eq, alloc) {
+ : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
+ hash, eq, alloc) {
insert(first, last);
}
@@ -902,7 +1256,8 @@ class raw_hash_set {
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
auto target = find_first_non_full(ctrl_, hash, capacity_);
- set_ctrl(target.offset, H2(hash));
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
emplace_at(target.offset, v);
infoz().RecordInsert(hash, target.probe_length);
}
@@ -998,6 +1353,8 @@ class raw_hash_set {
// past that we simply deallocate the array.
if (capacity_ > 127) {
destroy_slots();
+
+ infoz().RecordClearedReservation();
} else if (capacity_) {
for (size_t i = 0; i != capacity_; ++i) {
if (IsFull(ctrl_[i])) {
@@ -1005,7 +1362,7 @@ class raw_hash_set {
}
}
size_ = 0;
- reset_ctrl();
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
}
assert(empty());
@@ -1019,8 +1376,7 @@ class raw_hash_set {
// m.insert(std::make_pair("abc", 42));
// TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
// bug.
- template <class T, RequiresInsertable<T> = 0,
- class T2 = T,
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
std::pair<iterator, bool> insert(T&& value) {
@@ -1240,7 +1596,8 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- AssertIsFull(it.ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_,
+ "erase() called on invalid iterator.");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1274,7 +1631,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- AssertIsFull(position.inner_.ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
+ "extract() called on invalid iterator.");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1311,21 +1669,31 @@ class raw_hash_set {
if (n == 0 && size_ == 0) {
destroy_slots();
infoz().RecordStorageChanged(0, 0);
+ infoz().RecordClearedReservation();
return;
}
+
// bitor is a faster way of doing `max` here. We will round up to the next
// power-of-2-minus-1, so bitor is good enough.
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity_) {
resize(m);
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
}
}
void reserve(size_t n) {
- size_t m = GrowthToLowerboundCapacity(n);
- if (m > capacity_) {
+ if (n > size() + growth_left()) {
+ size_t m = GrowthToLowerboundCapacity(n);
resize(NormalizeCapacity(m));
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
}
}
@@ -1351,11 +1719,13 @@ class raw_hash_set {
template <class K = key_type>
void prefetch(const key_arg<K>& key) const {
(void)key;
-#if defined(__GNUC__)
+ // Avoid probing if we won't be able to prefetch the addresses received.
+#ifdef ABSL_INTERNAL_HAVE_PREFETCH
+ prefetch_heap_block();
auto seq = probe(ctrl_, hash_ref()(key), capacity_);
- __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
- __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
-#endif // __GNUC__
+ base_internal::PrefetchT0(ctrl_ + seq.offset());
+ base_internal::PrefetchT0(slots_ + seq.offset());
+#endif // ABSL_INTERNAL_HAVE_PREFETCH
}
// The API of find() has two extensions.
@@ -1370,19 +1740,20 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
}
template <class K = key_type>
iterator find(const key_arg<K>& key) {
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -1392,6 +1763,7 @@ class raw_hash_set {
}
template <class K = key_type>
const_iterator find(const key_arg<K>& key) const {
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -1441,6 +1813,14 @@ class raw_hash_set {
return !(a == b);
}
+ template <typename H>
+ friend typename std::enable_if<H::template is_hashable<value_type>::value,
+ H>::type
+ AbslHashValue(H h, const raw_hash_set& s) {
+ return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
+ s.size());
+ }
+
friend void swap(raw_hash_set& a,
raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
a.swap(b);
@@ -1506,17 +1886,17 @@ class raw_hash_set {
slot_type&& slot;
};
- // "erases" the object from the container, except that it doesn't actually
- // destroy the object. It only updates all the metadata of the class.
- // This can be used in conjunction with Policy::transfer to move the object to
- // another place.
+ // Erases, but does not destroy, the value pointed to by `it`.
+ //
+ // This merely updates the pertinent control byte. This can be used in
+ // conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
--size_;
- const size_t index = it.inner_.ctrl_ - ctrl_;
+ const size_t index = static_cast<size_t>(it.inner_.ctrl_ - ctrl_);
const size_t index_before = (index - Group::kWidth) & capacity_;
- const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
- const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+ const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty();
+ const auto empty_before = Group(ctrl_ + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
@@ -1526,11 +1906,17 @@ class raw_hash_set {
static_cast<size_t>(empty_after.TrailingZeros() +
empty_before.LeadingZeros()) < Group::kWidth;
- set_ctrl(index, was_never_full ? kEmpty : kDeleted);
+ SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
+ capacity_, ctrl_, slots_, sizeof(slot_type));
growth_left() += was_never_full;
infoz().RecordErase();
}
+ // Allocates a backing array for `self` and initializes its control bytes.
+ // This reads `capacity_` and updates all other fields based on the result of
+ // the allocation.
+ //
+ // This does not free the currently held array; `capacity_` must be nonzero.
void initialize_slots() {
assert(capacity_);
// Folks with custom allocators often make unwarranted assumptions about the
@@ -1545,19 +1931,24 @@ class raw_hash_set {
// bound more carefully.
if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
slots_ == nullptr) {
- infoz() = Sample();
+ infoz() = Sample(sizeof(slot_type));
}
- auto layout = MakeLayout(capacity_);
- char* mem = static_cast<char*>(
- Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
- ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
- slots_ = layout.template Pointer<1>(mem);
- reset_ctrl();
+ char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
+ &alloc_ref(),
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
+ ctrl_ = reinterpret_cast<ctrl_t*>(mem);
+ slots_ = reinterpret_cast<slot_type*>(
+ mem + SlotOffset(capacity_, alignof(slot_type)));
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
infoz().RecordStorageChanged(size_, capacity_);
}
+ // Destroys all slots in the backing array, frees the backing array, and
+ // clears all top-level book-keeping data.
+ //
+ // This essentially implements `map = raw_hash_set();`.
void destroy_slots() {
if (!capacity_) return;
for (size_t i = 0; i != capacity_; ++i) {
@@ -1565,10 +1956,12 @@ class raw_hash_set {
PolicyTraits::destroy(&alloc_ref(), slots_ + i);
}
}
- auto layout = MakeLayout(capacity_);
+
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), ctrl_,
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
ctrl_ = EmptyGroup();
slots_ = nullptr;
size_ = 0;
@@ -1592,20 +1985,23 @@ class raw_hash_set {
auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
- set_ctrl(new_i, H2(hash));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
}
}
if (old_capacity) {
SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity);
- auto layout = MakeLayout(old_capacity);
- Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
- layout.AllocSize());
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), old_ctrl,
+ AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
infoz().RecordRehash(total_probe_length);
}
+ // Prunes control bytes to remove as many tombstones as possible.
+ //
+ // See the comment on `rehash_and_grow_if_necessary()`.
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
assert(!is_small(capacity_));
@@ -1631,35 +2027,35 @@ class raw_hash_set {
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
if (!IsDeleted(ctrl_[i])) continue;
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
- PolicyTraits::element(slots_ + i));
- auto target = find_first_non_full(ctrl_, hash, capacity_);
- size_t new_i = target.offset;
+ const size_t hash = PolicyTraits::apply(
+ HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
+ const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
+ const size_t new_i = target.offset;
total_probe_length += target.probe_length;
// Verify if the old and new i fall within the same group wrt the hash.
// If they do, we don't need to move the object as it falls already in the
// best probe we can.
- const auto probe_index = [&](size_t pos) {
- return ((pos - probe(ctrl_, hash, capacity_).offset()) & capacity_) /
- Group::kWidth;
+ const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
+ const auto probe_index = [probe_offset, this](size_t pos) {
+ return ((pos - probe_offset) & capacity_) / Group::kWidth;
};
// Element doesn't move.
if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
- set_ctrl(i, H2(hash));
+ SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
continue;
}
if (IsEmpty(ctrl_[new_i])) {
// Transfer element to the empty spot.
- // set_ctrl poisons/unpoisons the slots so we have to call it at the
+ // SetCtrl poisons/unpoisons the slots so we have to call it at the
// right time.
- set_ctrl(new_i, H2(hash));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
- set_ctrl(i, kEmpty);
+ SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
} else {
assert(IsDeleted(ctrl_[new_i]));
- set_ctrl(new_i, H2(hash));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
// Until we are done rehashing, DELETED marks previously FULL slots.
// Swap i and new_i elements.
PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
@@ -1672,11 +2068,58 @@ class raw_hash_set {
infoz().RecordRehash(total_probe_length);
}
+ // Called whenever the table *might* need to conditionally grow.
+ //
+ // This function is an optimization opportunity to perform a rehash even when
+ // growth is unnecessary, because vacating tombstones is beneficial for
+ // performance in the long-run.
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
resize(1);
- } else if (size() <= CapacityToGrowth(capacity()) / 2) {
+ } else if (capacity_ > Group::kWidth &&
+ // Do these calcuations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
+ //
+ // Rehash in place if the current size is <= 25/32 of capacity_.
+ // Rationale for such a high factor: 1) drop_deletes_without_resize() is
+ // faster than resize, and 2) it takes quite a bit of work to add
+ // tombstones. In the worst case, seems to take approximately 4
+ // insert/erase pairs to create a single tombstone and so if we are
+ // rehashing because of tombstones, we can afford to rehash-in-place as
+ // long as we are reclaiming at least 1/8 the capacity without doing more
+ // than 2X the work. (Where "work" is defined to be size() for rehashing
+ // or rehashing in place, and 1 for an insert or erase.) But rehashing in
+ // place is faster per operation than inserting or even doubling the size
+ // of the table, so we actually afford to reclaim even less space from a
+ // resize-in-place. The decision is to rehash in place if we can reclaim
+ // at about 1/8th of the usable capacity (specifically 3/28 of the
+ // capacity) which means that the total cost of rehashing will be a small
+ // fraction of the total work.
+ //
+ // Here is output of an experiment using the BM_CacheInSteadyState
+ // benchmark running the old case (where we rehash-in-place only if we can
+ // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
+ // if we can recover 3/32*capacity_).
+ //
+ // Note that although in the worst-case number of rehashes jumped up from
+ // 15 to 190, but the number of operations per second is almost the same.
+ //
+ // Abridged output of running BM_CacheInSteadyState benchmark from
+ // raw_hash_set_benchmark. N is the number of insert/erase operations.
+ //
+ // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
+ // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
+ // 448 | 145284 0.44 18 | 140118 0.44 19
+ // 493 | 152546 0.24 11 | 151417 0.48 28
+ // 538 | 151439 0.26 11 | 151152 0.53 38
+ // 583 | 151765 0.28 11 | 150572 0.57 50
+ // 628 | 150241 0.31 11 | 150853 0.61 66
+ // 672 | 149602 0.33 12 | 150110 0.66 90
+ // 717 | 149998 0.35 12 | 149531 0.70 129
+ // 762 | 149836 0.37 13 | 148559 0.74 190
+ // 807 | 149736 0.39 14 | 151107 0.39 14
+ // 852 | 150204 0.42 15 | 151019 0.42 15
drop_deletes_without_resize();
} else {
// Otherwise grow the container.
@@ -1689,14 +2132,14 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
elem))
return true;
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return false;
}
@@ -1714,25 +2157,33 @@ class raw_hash_set {
}
protected:
+ // Attempts to find `key` in the table; if it isn't found, returns a slot that
+ // the value can be inserted into, with the control byte already set to
+ // `key`'s H2.
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ prefetch_heap_block();
auto hash = hash_ref()(key);
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return {seq.offset(i), false};
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
+ // Given the hash of a value not currently in the table, finds the next
+ // viable slot index to insert it at.
+ //
+ // REQUIRES: At least one non-full slot available.
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
@@ -1742,7 +2193,8 @@ class raw_hash_set {
}
++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]);
- set_ctrl(target.offset, H2(hash));
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
@@ -1771,35 +2223,29 @@ class raw_hash_set {
private:
friend struct RawHashSetTestOnlyAccess;
- // Reset all ctrl bytes back to kEmpty, except the sentinel.
- void reset_ctrl() {
- std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
- ctrl_[capacity_] = kSentinel;
- SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- }
-
void reset_growth_left() {
growth_left() = CapacityToGrowth(capacity()) - size_;
}
- // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
- // the end too.
- void set_ctrl(size_t i, ctrl_t h) {
- assert(i < capacity_);
-
- if (IsFull(h)) {
- SanitizerUnpoisonObject(slots_ + i);
- } else {
- SanitizerPoisonObject(slots_ + i);
- }
+ // The number of slots we can still fill without needing to rehash.
+ //
+ // This is stored separately due to tombstones: we do not include tombstones
+ // in the growth capacity, because we'd like to rehash when the table is
+ // otherwise filled with tombstones: otherwise, probe sequences might get
+ // unacceptably long without triggering a rehash. Callers can also force a
+ // rehash via the standard `rehash(0)`, which will recompute this value as a
+ // side-effect.
+ //
+ // See `CapacityToGrowth()`.
+ size_t& growth_left() { return settings_.template get<0>(); }
- ctrl_[i] = h;
- ctrl_[((i - Group::kWidth) & capacity_) + 1 +
- ((Group::kWidth - 1) & capacity_)] = h;
+ // Prefetch the heap-allocated memory region to resolve potential TLB misses.
+ // This is intended to overlap with execution of calculating the hash for a
+ // key.
+ void prefetch_heap_block() const {
+ base_internal::PrefetchT2(ctrl_);
}
- size_t& growth_left() { return settings_.template get<0>(); }
-
HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
hasher& hash_ref() { return settings_.template get<2>(); }
@@ -1814,26 +2260,41 @@ class raw_hash_set {
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
- ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t]
- slot_type* slots_ = nullptr; // [capacity * slot_type]
- size_t size_ = 0; // number of full slots
- size_t capacity_ = 0; // total number of slots
+
+ // The control bytes (and, also, a pointer to the base of the backing array).
+ //
+ // This contains `capacity_ + 1 + NumClonedBytes()` entries, even
+ // when the table is empty (hence EmptyGroup).
+ ctrl_t* ctrl_ = EmptyGroup();
+ // The beginning of the slots, located at `SlotOffset()` bytes after
+ // `ctrl_`. May be null for empty tables.
+ slot_type* slots_ = nullptr;
+
+ // The number of filled slots.
+ size_t size_ = 0;
+
+ // The total number of available slots.
+ size_t capacity_ = 0;
absl::container_internal::CompressedTuple<size_t /* growth_left */,
HashtablezInfoHandle, hasher,
key_equal, allocator_type>
- settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{},
allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
-void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
+typename raw_hash_set<P, H, E, A>::size_type EraseIf(
+ Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+ const auto initial_size = c->size();
for (auto it = c->begin(), last = c->end(); it != last;) {
- auto copy_it = it++;
- if (pred(*copy_it)) {
- c->erase(copy_it);
+ if (pred(*it)) {
+ c->erase(it++);
+ } else {
+ ++it;
}
}
+ return initial_size - c->size();
}
namespace hashtable_debug_internal {
@@ -1849,7 +2310,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
- for (int i : g.Match(container_internal::H2(hash))) {
+ for (uint32_t i : g.Match(container_internal::H2(hash))) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
@@ -1857,7 +2318,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
return num_probes;
++num_probes;
}
- if (g.MatchEmpty()) return num_probes;
+ if (g.MaskEmpty()) return num_probes;
seq.next();
++num_probes;
}
@@ -1866,8 +2327,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity_;
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(capacity);
- size_t m = layout.AllocSize();
+ size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
@@ -1885,8 +2345,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t LowerBoundAllocatedByteSize(size_t size) {
size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
- size_t m = layout.AllocSize();
+ size_t m =
+ AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * size;
@@ -1900,4 +2360,6 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
ABSL_NAMESPACE_END
} // namespace absl
+#undef ABSL_INTERNAL_ASSERT_IS_FULL
+
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc
index f9be2c5a..47dc9048 100644
--- a/absl/container/internal/raw_hash_set_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -254,6 +254,23 @@ void BM_CopyAssign(benchmark::State& state) {
}
BENCHMARK(BM_CopyAssign)->Range(128, 4096);
+void BM_RangeCtor(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+ std::vector<int> values;
+ const size_t desired_size = state.range(0);
+ while (values.size() < desired_size) {
+ values.emplace_back(dist(rng));
+ }
+
+ for (auto unused : state) {
+ IntTable t{values.begin(), values.end()};
+ benchmark::DoNotOptimize(t);
+ }
+}
+BENCHMARK(BM_RangeCtor)->Range(128, 65536);
+
void BM_NoOpReserveIntTable(benchmark::State& state) {
IntTable t;
t.reserve(100000);
@@ -298,56 +315,79 @@ void BM_ReserveStringTable(benchmark::State& state) {
}
BENCHMARK(BM_ReserveStringTable)->Range(128, 4096);
+// Like std::iota, except that ctrl_t doesn't support operator++.
+template <typename CtrlIter>
+void Iota(CtrlIter begin, CtrlIter end, int value) {
+ for (; begin != end; ++begin, ++value) {
+ *begin = static_cast<ctrl_t>(value);
+ }
+}
+
void BM_Group_Match(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
- std::iota(group.begin(), group.end(), -4);
+ Iota(group.begin(), group.end(), -4);
Group g{group.data()};
h2_t h = 1;
for (auto _ : state) {
::benchmark::DoNotOptimize(h);
+ ::benchmark::DoNotOptimize(g);
::benchmark::DoNotOptimize(g.Match(h));
}
}
BENCHMARK(BM_Group_Match);
-void BM_Group_MatchEmpty(benchmark::State& state) {
+void BM_Group_MaskEmpty(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
- std::iota(group.begin(), group.end(), -4);
+ Iota(group.begin(), group.end(), -4);
Group g{group.data()};
- for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmpty());
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
+ ::benchmark::DoNotOptimize(g.MaskEmpty());
+ }
}
-BENCHMARK(BM_Group_MatchEmpty);
+BENCHMARK(BM_Group_MaskEmpty);
-void BM_Group_MatchEmptyOrDeleted(benchmark::State& state) {
+void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
- std::iota(group.begin(), group.end(), -4);
+ Iota(group.begin(), group.end(), -4);
Group g{group.data()};
- for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmptyOrDeleted());
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
+ ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted());
+ }
}
-BENCHMARK(BM_Group_MatchEmptyOrDeleted);
+BENCHMARK(BM_Group_MaskEmptyOrDeleted);
void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
- std::iota(group.begin(), group.end(), -2);
+ Iota(group.begin(), group.end(), -2);
Group g{group.data()};
- for (auto _ : state)
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted());
+ }
}
BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted);
void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
- std::iota(group.begin(), group.end(), -2);
+ Iota(group.begin(), group.end(), -2);
Group g{group.data()};
- for (auto _ : state) ::benchmark::DoNotOptimize(*g.MatchEmptyOrDeleted());
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
+ ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet());
+ }
}
BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted);
void BM_DropDeletes(benchmark::State& state) {
constexpr size_t capacity = (1 << 20) - 1;
std::vector<ctrl_t> ctrl(capacity + 1 + Group::kWidth);
- ctrl[capacity] = kSentinel;
- std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+ ctrl[capacity] = ctrl_t::kSentinel;
+ std::vector<ctrl_t> pattern = {ctrl_t::kEmpty, static_cast<ctrl_t>(2),
+ ctrl_t::kDeleted, static_cast<ctrl_t>(2),
+ ctrl_t::kEmpty, static_cast<ctrl_t>(1),
+ ctrl_t::kDeleted};
for (size_t i = 0; i != capacity; ++i) {
ctrl[i] = pattern[i % pattern.size()];
}
@@ -378,6 +418,12 @@ bool CodegenAbslRawHashSetInt64FindNeEnd(
return table->find(key) != table->end();
}
+auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table,
+ int64_t key)
+ -> decltype(table->insert(key)) {
+ return table->insert(key);
+}
+
bool CodegenAbslRawHashSetInt64Contains(
absl::container_internal::IntTable* table, int64_t key) {
return table->contains(key);
@@ -391,6 +437,7 @@ void CodegenAbslRawHashSetInt64Iterate(
int odr =
(::benchmark::DoNotOptimize(std::make_tuple(
&CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd,
+ &CodegenAbslRawHashSetInt64Insert,
&CodegenAbslRawHashSetInt64Contains,
&CodegenAbslRawHashSetInt64Iterate)),
1);
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 7dac65a0..f77ffbc1 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -31,6 +31,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/prefetch.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h"
@@ -58,6 +59,9 @@ using ::testing::Lt;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
+// Convenience function to static cast to ctrl_t.
+ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
+
TEST(Util, NormalizeCapacity) {
EXPECT_EQ(1, NormalizeCapacity(0));
EXPECT_EQ(1, NormalizeCapacity(1));
@@ -170,15 +174,19 @@ TEST(Group, EmptyGroup) {
TEST(Group, Match) {
if (Group::kWidth == 16) {
- ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
- 7, 5, 3, 1, 1, 1, 1, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
+ ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
+ CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
EXPECT_THAT(Group{group}.Match(0), ElementsAre());
EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
} else if (Group::kWidth == 8) {
- ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
+ ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
+ ctrl_t::kSentinel, CtrlT(1)};
EXPECT_THAT(Group{group}.Match(0), ElementsAre());
EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
@@ -187,27 +195,39 @@ TEST(Group, Match) {
}
}
-TEST(Group, MatchEmpty) {
+TEST(Group, MaskEmpty) {
if (Group::kWidth == 16) {
- ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
- 7, 5, 3, 1, 1, 1, 1, 1};
- EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
+ ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
+ CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
} else if (Group::kWidth == 8) {
- ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
- EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
+ ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
+ ctrl_t::kSentinel, CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
} else {
FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
}
}
-TEST(Group, MatchEmptyOrDeleted) {
+TEST(Group, MaskEmptyOrDeleted) {
if (Group::kWidth == 16) {
- ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
- 7, 5, 3, 1, 1, 1, 1, 1};
- EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
+ ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
+ CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
} else if (Group::kWidth == 8) {
- ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
- EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
+ ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
+ ctrl_t::kSentinel, CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
} else {
FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
}
@@ -217,28 +237,32 @@ TEST(Batch, DropDeletes) {
constexpr size_t kCapacity = 63;
constexpr size_t kGroupWidth = container_internal::Group::kWidth;
std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
- ctrl[kCapacity] = kSentinel;
- std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+ ctrl[kCapacity] = ctrl_t::kSentinel;
+ std::vector<ctrl_t> pattern = {
+ ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
+ ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
for (size_t i = 0; i != kCapacity; ++i) {
ctrl[i] = pattern[i % pattern.size()];
if (i < kGroupWidth - 1)
ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
}
ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
- ASSERT_EQ(ctrl[kCapacity], kSentinel);
- for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+ ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
+ for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
- if (i == kCapacity) expected = kSentinel;
- if (expected == kDeleted) expected = kEmpty;
- if (IsFull(expected)) expected = kDeleted;
+ if (i == kCapacity) expected = ctrl_t::kSentinel;
+ if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
+ if (IsFull(expected)) expected = ctrl_t::kDeleted;
EXPECT_EQ(ctrl[i], expected)
- << i << " " << int{pattern[i % pattern.size()]};
+ << i << " " << static_cast<int>(pattern[i % pattern.size()]);
}
}
TEST(Group, CountLeadingEmptyOrDeleted) {
- const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
- const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
+ const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
+ const std::vector<ctrl_t> full_examples = {
+ CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3),
+ CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
for (ctrl_t empty : empty_examples) {
std::vector<ctrl_t> e(Group::kWidth, empty);
@@ -294,6 +318,7 @@ struct ValuePolicy {
};
using IntPolicy = ValuePolicy<int64_t>;
+using Uint8Policy = ValuePolicy<uint8_t>;
class StringPolicy {
template <class F, class K, class V,
@@ -374,6 +399,13 @@ struct IntTable
using Base::Base;
};
+struct Uint8Table
+ : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, std::allocator<uint8_t>> {
+ using Base = typename Uint8Table::raw_hash_set;
+ using Base::Base;
+};
+
template <typename T>
struct CustomAlloc : std::allocator<T> {
CustomAlloc() {}
@@ -541,6 +573,37 @@ TEST(Table, InsertCollisionAndFindAfterDelete) {
EXPECT_TRUE(t.empty());
}
+TEST(Table, InsertWithinCapacity) {
+ IntTable t;
+ t.reserve(10);
+ const size_t original_capacity = t.capacity();
+ const auto addr = [&](int i) {
+ return reinterpret_cast<uintptr_t>(&*t.find(i));
+ };
+ // Inserting an element does not change capacity.
+ t.insert(0);
+ EXPECT_THAT(t.capacity(), original_capacity);
+ const uintptr_t original_addr_0 = addr(0);
+ // Inserting another element does not rehash.
+ t.insert(1);
+ EXPECT_THAT(t.capacity(), original_capacity);
+ EXPECT_THAT(addr(0), original_addr_0);
+ // Inserting lots of duplicate elements does not rehash.
+ for (int i = 0; i < 100; ++i) {
+ t.insert(i % 10);
+ }
+ EXPECT_THAT(t.capacity(), original_capacity);
+ EXPECT_THAT(addr(0), original_addr_0);
+ // Inserting a range of duplicate elements does not rehash.
+ std::vector<int> dup_range;
+ for (int i = 0; i < 100; ++i) {
+ dup_range.push_back(i % 10);
+ }
+ t.insert(dup_range.begin(), dup_range.end());
+ EXPECT_THAT(t.capacity(), original_capacity);
+ EXPECT_THAT(addr(0), original_addr_0);
+}
+
TEST(Table, LazyEmplace) {
StringTable t;
bool called = false;
@@ -588,28 +651,53 @@ TEST(Table, Contains2) {
}
int decompose_constructed;
+int decompose_copy_constructed;
+int decompose_copy_assigned;
+int decompose_move_constructed;
+int decompose_move_assigned;
struct DecomposeType {
- DecomposeType(int i) : i(i) { // NOLINT
+ DecomposeType(int i = 0) : i(i) { // NOLINT
++decompose_constructed;
}
explicit DecomposeType(const char* d) : DecomposeType(*d) {}
+ DecomposeType(const DecomposeType& other) : i(other.i) {
+ ++decompose_copy_constructed;
+ }
+ DecomposeType& operator=(const DecomposeType& other) {
+ ++decompose_copy_assigned;
+ i = other.i;
+ return *this;
+ }
+ DecomposeType(DecomposeType&& other) : i(other.i) {
+ ++decompose_move_constructed;
+ }
+ DecomposeType& operator=(DecomposeType&& other) {
+ ++decompose_move_assigned;
+ i = other.i;
+ return *this;
+ }
+
int i;
};
struct DecomposeHash {
using is_transparent = void;
- size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(const DecomposeType& a) const { return a.i; }
size_t operator()(int a) const { return a; }
size_t operator()(const char* a) const { return *a; }
};
struct DecomposeEq {
using is_transparent = void;
- bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
- bool operator()(DecomposeType a, int b) const { return a.i == b; }
- bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
+ bool operator()(const DecomposeType& a, const DecomposeType& b) const {
+ return a.i == b.i;
+ }
+ bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
+ bool operator()(const DecomposeType& a, const char* b) const {
+ return a.i == *b;
+ }
};
struct DecomposePolicy {
@@ -619,9 +707,9 @@ struct DecomposePolicy {
template <typename T>
static void construct(void*, DecomposeType* slot, T&& v) {
- *slot = DecomposeType(std::forward<T>(v));
+ ::new (slot) DecomposeType(std::forward<T>(v));
}
- static void destroy(void*, DecomposeType*) {}
+ static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
static DecomposeType& element(slot_type* slot) { return *slot; }
template <class F, class T>
@@ -636,8 +724,13 @@ void TestDecompose(bool construct_three) {
const int one = 1;
const char* three_p = "3";
const auto& three = three_p;
+ const int elem_vector_count = 256;
+ std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
+ std::iota(elem_vector.begin(), elem_vector.end(), 0);
- raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
+ using DecomposeSet =
+ raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
+ DecomposeSet set1;
decompose_constructed = 0;
int expected_constructed = 0;
@@ -695,20 +788,72 @@ void TestDecompose(bool construct_three) {
expected_constructed += construct_three;
EXPECT_EQ(expected_constructed, decompose_constructed);
}
+
+ decompose_copy_constructed = 0;
+ decompose_copy_assigned = 0;
+ decompose_move_constructed = 0;
+ decompose_move_assigned = 0;
+ int expected_copy_constructed = 0;
+ int expected_move_constructed = 0;
+ { // raw_hash_set(first, last) with random-access iterators
+ DecomposeSet set2(elem_vector.begin(), elem_vector.end());
+ // Expect exactly one copy-constructor call for each element if no
+ // rehashing is done.
+ expected_copy_constructed += elem_vector_count;
+ EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+ EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
+ EXPECT_EQ(0, decompose_move_assigned);
+ EXPECT_EQ(0, decompose_copy_assigned);
+ }
+
+ { // raw_hash_set(first, last) with forward iterators
+ std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
+ expected_copy_constructed = decompose_copy_constructed;
+ DecomposeSet set2(elem_list.begin(), elem_list.end());
+ // Expect exactly N elements copied into set, expect at most 2*N elements
+ // moving internally for all resizing needed (for a growth factor of 2).
+ expected_copy_constructed += elem_vector_count;
+ EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+ expected_move_constructed += elem_vector_count;
+ EXPECT_LT(expected_move_constructed, decompose_move_constructed);
+ expected_move_constructed += elem_vector_count;
+ EXPECT_GE(expected_move_constructed, decompose_move_constructed);
+ EXPECT_EQ(0, decompose_move_assigned);
+ EXPECT_EQ(0, decompose_copy_assigned);
+ expected_copy_constructed = decompose_copy_constructed;
+ expected_move_constructed = decompose_move_constructed;
+ }
+
+ { // insert(first, last)
+ DecomposeSet set2;
+ set2.insert(elem_vector.begin(), elem_vector.end());
+ // Expect exactly N elements copied into set, expect at most 2*N elements
+ // moving internally for all resizing needed (for a growth factor of 2).
+ const int expected_new_elements = elem_vector_count;
+ const int expected_max_element_moves = 2 * elem_vector_count;
+ expected_copy_constructed += expected_new_elements;
+ EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+ expected_move_constructed += expected_max_element_moves;
+ EXPECT_GE(expected_move_constructed, decompose_move_constructed);
+ EXPECT_EQ(0, decompose_move_assigned);
+ EXPECT_EQ(0, decompose_copy_assigned);
+ expected_copy_constructed = decompose_copy_constructed;
+ expected_move_constructed = decompose_move_constructed;
+ }
}
TEST(Table, Decompose) {
TestDecompose<DecomposeHash, DecomposeEq>(false);
struct TransparentHashIntOverload {
- size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(const DecomposeType& a) const { return a.i; }
size_t operator()(int a) const { return a; }
};
struct TransparentEqIntOverload {
- bool operator()(DecomposeType a, DecomposeType b) const {
+ bool operator()(const DecomposeType& a, const DecomposeType& b) const {
return a.i == b.i;
}
- bool operator()(DecomposeType a, int b) const { return a.i == b; }
+ bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
};
TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
@@ -750,7 +895,7 @@ TEST(Table, RehashWithNoResize) {
const size_t capacity = t.capacity();
// Remove elements from all groups except the first and the last one.
- // All elements removed from full groups will be marked as kDeleted.
+ // All elements removed from full groups will be marked as ctrl_t::kDeleted.
const size_t erase_begin = Group::kWidth / 2;
const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
for (size_t i = erase_begin; i < erase_end; ++i) {
@@ -1104,7 +1249,7 @@ ExpectedStats XorSeedExpectedStats() {
case 16:
if (kRandomizesInserts) {
return {0.1,
- 1.0,
+ 2.0,
{{0.95, 0.1}},
{{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
} else {
@@ -1118,6 +1263,7 @@ ExpectedStats XorSeedExpectedStats() {
return {};
}
+// TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1190,17 +1336,17 @@ ExpectedStats LinearTransformExpectedStats() {
{{0.95, 0.3}},
{{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
} else {
- return {0.15,
- 0.5,
- {{0.95, 0.3}},
- {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
+ return {0.4,
+ 0.6,
+ {{0.95, 0.5}},
+ {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
}
case 16:
if (kRandomizesInserts) {
return {0.1,
0.4,
{{0.95, 0.3}},
- {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
} else {
return {0.05,
0.2,
@@ -1212,6 +1358,7 @@ ExpectedStats LinearTransformExpectedStats() {
return {};
}
+// TODO(b/80415403): Figure out why this test is so flaky.
TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1888,7 +2035,7 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- constexpr char kDeathMsg[] = "Invalid operation on iterator";
+ constexpr char kDeathMsg[] = "erase.. called on invalid iterator";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
@@ -1898,7 +2045,7 @@ TEST(RawHashSamplerTest, Sample) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
- auto& sampler = HashtablezSampler::Global();
+ auto& sampler = GlobalHashtablezSampler();
size_t start_size = 0;
std::unordered_set<const HashtablezInfo*> preexisting_info;
start_size += sampler.Iterate([&](const HashtablezInfo& info) {
@@ -1909,16 +2056,33 @@ TEST(RawHashSamplerTest, Sample) {
std::vector<IntTable> tables;
for (int i = 0; i < 1000000; ++i) {
tables.emplace_back();
+
+ const bool do_reserve = (i % 10 > 5);
+ const bool do_rehash = !do_reserve && (i % 10 > 0);
+
+ if (do_reserve) {
+ // Don't reserve on all tables.
+ tables.back().reserve(10 * (i % 10));
+ }
+
tables.back().insert(1);
tables.back().insert(i % 5);
+
+ if (do_rehash) {
+ // Rehash some other tables.
+ tables.back().rehash(10 * (i % 10));
+ }
}
size_t end_size = 0;
std::unordered_map<size_t, int> observed_checksums;
+ std::unordered_map<ssize_t, int> reservations;
end_size += sampler.Iterate([&](const HashtablezInfo& info) {
if (preexisting_info.count(&info) == 0) {
observed_checksums[info.hashes_bitwise_xor.load(
std::memory_order_relaxed)]++;
+ reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
}
+ EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
++end_size;
});
@@ -1928,6 +2092,15 @@ TEST(RawHashSamplerTest, Sample) {
for (const auto& [_, count] : observed_checksums) {
EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
}
+
+ EXPECT_EQ(reservations.size(), 10);
+ for (const auto& [reservation, count] : reservations) {
+ EXPECT_GE(reservation, 0);
+ EXPECT_LT(reservation, 100);
+
+ EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
+ << reservation;
+ }
}
#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
@@ -1936,7 +2109,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
- auto& sampler = HashtablezSampler::Global();
+ auto& sampler = GlobalHashtablezSampler();
size_t start_size = 0;
start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
@@ -1978,6 +2151,36 @@ TEST(Sanitizer, PoisoningOnErase) {
}
#endif // ABSL_HAVE_ADDRESS_SANITIZER
+TEST(Table, AlignOne) {
+ // We previously had a bug in which we were copying a control byte over the
+ // first slot when alignof(value_type) is 1. We test repeated
+ // insertions/erases and verify that the behavior is correct.
+ Uint8Table t;
+ std::unordered_set<uint8_t> verifier; // NOLINT
+
+ // Do repeated insertions/erases from the table.
+ for (int64_t i = 0; i < 100000; ++i) {
+ SCOPED_TRACE(i);
+ const uint8_t u = (i * -i) & 0xFF;
+ auto it = t.find(u);
+ auto verifier_it = verifier.find(u);
+ if (it == t.end()) {
+ ASSERT_EQ(verifier_it, verifier.end());
+ t.insert(u);
+ verifier.insert(u);
+ } else {
+ ASSERT_NE(verifier_it, verifier.end());
+ t.erase(it);
+ verifier.erase(verifier_it);
+ }
+ }
+
+ EXPECT_EQ(t.size(), verifier.size());
+ for (uint8_t u : t) {
+ EXPECT_EQ(verifier.count(u), 1);
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/unordered_map_constructor_test.h b/absl/container/internal/unordered_map_constructor_test.h
index 3f90ad7c..7e84dc25 100644
--- a/absl/container/internal/unordered_map_constructor_test.h
+++ b/absl/container/internal/unordered_map_constructor_test.h
@@ -179,7 +179,7 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
@@ -198,7 +198,7 @@ void InputIteratorBucketAllocTest(std::true_type) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
@@ -221,7 +221,7 @@ void InputIteratorBucketHashAllocTest(std::true_type) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
@@ -241,8 +241,9 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -262,8 +263,9 @@ void CopyConstructorAllocTest(std::true_type) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -285,8 +287,9 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -307,8 +310,9 @@ void MoveConstructorAllocTest(std::true_type) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -325,7 +329,7 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -348,7 +352,7 @@ template <typename TypeParam>
void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
@@ -371,7 +375,7 @@ void InitializerListBucketHashAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
@@ -392,7 +396,7 @@ TYPED_TEST_P(ConstructorTest, Assignment) {
H hasher;
E equal;
A alloc(0);
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
@@ -412,7 +416,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) {
H hasher;
E equal;
A alloc(0);
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
@@ -424,7 +428,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) {
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
@@ -433,7 +437,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
@@ -442,7 +446,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
@@ -452,7 +456,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
@@ -461,7 +465,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign
@@ -472,7 +476,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
// containers in unspecified state (and in practice in causes memory-leak
// according to heap-checker!).
-REGISTER_TYPED_TEST_CASE_P(
+REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
diff --git a/absl/container/internal/unordered_map_lookup_test.h b/absl/container/internal/unordered_map_lookup_test.h
index e76421e5..3713cd9a 100644
--- a/absl/container/internal/unordered_map_lookup_test.h
+++ b/absl/container/internal/unordered_map_lookup_test.h
@@ -107,8 +107,8 @@ TYPED_TEST_P(LookupTest, EqualRange) {
}
}
-REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
- EqualRange);
+REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find,
+ EqualRange);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h
index 8c9ca779..4d9ab30f 100644
--- a/absl/container/internal/unordered_map_modifiers_test.h
+++ b/absl/container/internal/unordered_map_modifiers_test.h
@@ -81,6 +81,38 @@ TYPED_TEST_P(ModifiersTest, InsertRange) {
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ m.insert(val2);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ std::copy_n(base_values.begin(), 10, std::back_inserter(values));
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
#ifdef UNORDERED_MAP_CXX17
using std::get;
@@ -265,10 +297,12 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, InsertOrAssign, InsertOrAssignHint,
- Emplace, EmplaceHint, TryEmplace, TryEmplaceHint,
- Erase, EraseRange, EraseKey, Swap);
+REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, InsertOrAssign,
+ InsertOrAssignHint, Emplace, EmplaceHint,
+ TryEmplace, TryEmplaceHint, Erase, EraseRange,
+ EraseKey, Swap);
template <typename Type>
struct is_unique_ptr : std::false_type {};
diff --git a/absl/container/internal/unordered_set_constructor_test.h b/absl/container/internal/unordered_set_constructor_test.h
index 41165b05..af1116e6 100644
--- a/absl/container/internal/unordered_set_constructor_test.h
+++ b/absl/container/internal/unordered_set_constructor_test.h
@@ -478,7 +478,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
-REGISTER_TYPED_TEST_CASE_P(
+REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
diff --git a/absl/container/internal/unordered_set_lookup_test.h b/absl/container/internal/unordered_set_lookup_test.h
index 8f2f4b20..b35f766e 100644
--- a/absl/container/internal/unordered_set_lookup_test.h
+++ b/absl/container/internal/unordered_set_lookup_test.h
@@ -82,7 +82,7 @@ TYPED_TEST_P(LookupTest, EqualRange) {
}
}
-REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/unordered_set_modifiers_test.h b/absl/container/internal/unordered_set_modifiers_test.h
index 26be58d9..d8864bb2 100644
--- a/absl/container/internal/unordered_set_modifiers_test.h
+++ b/absl/container/internal/unordered_set_modifiers_test.h
@@ -74,6 +74,36 @@ TYPED_TEST_P(ModifiersTest, InsertRange) {
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ values.insert(values.end(), base_values.begin(), base_values.end());
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
TYPED_TEST_P(ModifiersTest, Emplace) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
@@ -179,9 +209,10 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, Emplace, EmplaceHint, Erase, EraseRange,
- EraseKey, Swap);
+REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, Emplace, EmplaceHint,
+ Erase, EraseRange, EraseKey, Swap);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h
index 7a39f628..6868e63a 100644
--- a/absl/container/node_hash_map.h
+++ b/absl/container/node_hash_map.h
@@ -41,9 +41,10 @@
#include <utility>
#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
#include "absl/memory/memory.h"
@@ -77,6 +78,10 @@ class NodeHashMapPolicy;
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::node_hash_map` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// Example:
//
// // Create a node hash map of three strings (that map to strings)
@@ -347,8 +352,8 @@ class node_hash_map
// `node_hash_map`.
//
// iterator try_emplace(const_iterator hint,
- // const init_type& k, Args&&... args):
- // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ // const key_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `node_hash_map` using the position of `hint` as a non-binding suggestion
@@ -525,17 +530,19 @@ class node_hash_map
// erase_if(node_hash_map<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename K, typename V, typename H, typename E, typename A,
typename Predicate>
-void erase_if(node_hash_map<K, V, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename node_hash_map<K, V, H, E, A>::size_type erase_if(
+ node_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
template <class Key, class Value>
class NodeHashMapPolicy
- : public absl::container_internal::node_hash_policy<
+ : public absl::container_internal::node_slot_policy<
std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> {
using value_type = std::pair<const Key, Value>;
diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc
index 8f59a1e4..e941a836 100644
--- a/absl/container/node_hash_map_test.cc
+++ b/absl/container/node_hash_map_test.cc
@@ -223,33 +223,36 @@ TEST(NodeHashMap, EraseIf) {
// Erase all elements.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return true; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return false; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
Pair(4, 4), Pair(5, 5)));
}
// Erase specific elements.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s,
- [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+ EXPECT_EQ(erase_if(s,
+ [](std::pair<const int, int> kvp) {
+ return kvp.first % 2 == 1;
+ }),
+ 3);
EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
}
// Predicate is function reference.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, FirstIsEven);
+ EXPECT_EQ(erase_if(s, FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
// Predicate is function pointer.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, &FirstIsEven);
+ EXPECT_EQ(erase_if(s, &FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
}
diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h
index 93b15f46..f2cc70c3 100644
--- a/absl/container/node_hash_set.h
+++ b/absl/container/node_hash_set.h
@@ -38,8 +38,9 @@
#include <type_traits>
#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
@@ -73,6 +74,10 @@ struct NodeHashSetPolicy;
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::node_hash_set` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// Example:
//
// // Create a node hash set of three strings
@@ -433,16 +438,18 @@ class node_hash_set
// erase_if(node_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename T, typename H, typename E, typename A, typename Predicate>
-void erase_if(node_hash_set<T, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename node_hash_set<T, H, E, A>::size_type erase_if(
+ node_hash_set<T, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
template <class T>
struct NodeHashSetPolicy
- : absl::container_internal::node_hash_policy<T&, NodeHashSetPolicy<T>> {
+ : absl::container_internal::node_slot_policy<T&, NodeHashSetPolicy<T>> {
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
diff --git a/absl/container/node_hash_set_test.cc b/absl/container/node_hash_set_test.cc
index 7ddad202..98a8dbdd 100644
--- a/absl/container/node_hash_set_test.cc
+++ b/absl/container/node_hash_set_test.cc
@@ -108,31 +108,31 @@ TEST(NodeHashSet, EraseIf) {
// Erase all elements.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return true; });
+ EXPECT_EQ(erase_if(s, [](int) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return false; });
+ EXPECT_EQ(erase_if(s, [](int) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
}
// Erase specific elements.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int k) { return k % 2 == 1; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3);
EXPECT_THAT(s, UnorderedElementsAre(2, 4));
}
// Predicate is function reference.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, IsEven);
+ EXPECT_EQ(erase_if(s, IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
// Predicate is function pointer.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, &IsEven);
+ EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
}
diff --git a/absl/container/sample_element_size_test.cc b/absl/container/sample_element_size_test.cc
new file mode 100644
index 00000000..b23626b4
--- /dev/null
+++ b/absl/container/sample_element_size_test.cc
@@ -0,0 +1,114 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/container/node_hash_map.h"
+#include "absl/container/node_hash_set.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+// Create some tables of type `Table`, then look at all the new
+// `HashtablezInfo`s to make sure that the `inline_element_size ==
+// expected_element_size`. The `inline_element_size` is the amount of memory
+// allocated for each slot of a hash table, that is `sizeof(slot_type)`. Add
+// the new `HashtablezInfo`s to `preexisting_info`. Store all the new tables
+// into `tables`.
+template <class Table>
+void TestInlineElementSize(
+ HashtablezSampler& sampler,
+ // clang-tidy gives a false positive on this declaration. This unordered
+ // set cannot be flat_hash_set, however, since that would introduce a mutex
+ // deadlock.
+ std::unordered_set<const HashtablezInfo*>& preexisting_info, // NOLINT
+ std::vector<Table>& tables, const typename Table::value_type& elt,
+ size_t expected_element_size) {
+ for (int i = 0; i < 10; ++i) {
+ // We create a new table and must store it somewhere so that when we store
+ // a pointer to the resulting `HashtablezInfo` into `preexisting_info`
+ // that we aren't storing a dangling pointer.
+ tables.emplace_back();
+ // We must insert an element to get a hashtablez to instantiate.
+ tables.back().insert(elt);
+ }
+ size_t new_count = 0;
+ sampler.Iterate([&](const HashtablezInfo& info) {
+ if (preexisting_info.insert(&info).second) {
+ EXPECT_EQ(info.inline_element_size, expected_element_size);
+ ++new_count;
+ }
+ });
+ // Make sure we actually did get a new hashtablez.
+ EXPECT_GT(new_count, 0);
+}
+
+struct bigstruct {
+ char a[1000];
+ friend bool operator==(const bigstruct& x, const bigstruct& y) {
+ return memcmp(x.a, y.a, sizeof(x.a)) == 0;
+ }
+ template <typename H>
+ friend H AbslHashValue(H h, const bigstruct& c) {
+ return H::combine_contiguous(std::move(h), c.a, sizeof(c.a));
+ }
+};
+#endif
+
+TEST(FlatHashMap, SampleElementSize) {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ // Enable sampling even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(1);
+
+ auto& sampler = GlobalHashtablezSampler();
+ std::vector<flat_hash_map<int, bigstruct>> flat_map_tables;
+ std::vector<flat_hash_set<bigstruct>> flat_set_tables;
+ std::vector<node_hash_map<int, bigstruct>> node_map_tables;
+ std::vector<node_hash_set<bigstruct>> node_set_tables;
+
+ // It takes thousands of new tables after changing the sampling parameters
+ // before you actually get some instrumentation. And if you must actually
+ // put something into those tables.
+ for (int i = 0; i < 10000; ++i) {
+ flat_map_tables.emplace_back();
+ flat_map_tables.back()[i] = bigstruct{};
+ }
+
+ // clang-tidy gives a false positive on this declaration. This unordered set
+ // cannot be a flat_hash_set, however, since that would introduce a mutex
+ // deadlock.
+ std::unordered_set<const HashtablezInfo*> preexisting_info; // NOLINT
+ sampler.Iterate(
+ [&](const HashtablezInfo& info) { preexisting_info.insert(&info); });
+ TestInlineElementSize(sampler, preexisting_info, flat_map_tables,
+ {0, bigstruct{}}, sizeof(int) + sizeof(bigstruct));
+ TestInlineElementSize(sampler, preexisting_info, node_map_tables,
+ {0, bigstruct{}}, sizeof(void*));
+ TestInlineElementSize(sampler, preexisting_info, flat_set_tables, //
+ bigstruct{}, sizeof(bigstruct));
+ TestInlineElementSize(sampler, preexisting_info, node_set_tables, //
+ bigstruct{}, sizeof(void*));
+#endif
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl