summaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2019-08-08 10:56:58 -0700
committerGravatar CJ Johnson <johnsoncj@google.com>2019-08-08 14:19:45 -0400
commitaa844899c937bde5d2b24f276b59997e5b668bde (patch)
treecd18e64150abc74b85bbbf6abf990f66fa47cacd /absl/container
parentfcb104594b0bb4b8ac306cb2f55ecdad40974683 (diff)
Creation of LTS branch "lts_2019_08_08"20190808
- 9ee91d3e430fb33a4590486573792eb0fa146c2d Export of internal Abseil changes by Abseil Team <absl-team@google.com> - 8efba58a3b656e9b41fb0471ae6453425a61c520 Export of internal Abseil changes by Abseil Team <absl-team@google.com> - b49b8d16b67ec6912899684b732e6367f258cfdb Export of internal Abseil changes by Abseil Team <absl-team@google.com> - 67222ffc4c83d918ce8395aa61769eeb77df4c4d Export of internal Abseil changes by Abseil Team <absl-team@google.com> - c5c4db4f5191fe5e76cbf68dcc71fb28702f7d2b Export of internal Abseil changes by Abseil Team <absl-team@google.com> - 14550beb3b7b97195e483fb74b5efb906395c31e Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 52e88ee56b72cf32bc66534d942c7398ce481331 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 36d37ab992038f52276ca66b9da80c1cf0f57dc2 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - ad1485c8986246b2ae9105e512738d0e97aec887 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f3840bc5e33ce4932e35986cf3718450c6f02af2 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 278b26058c036833a4f7f3047d3f4d9296527f87 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - c6c3c1b498e4ee939b24be59cae29d59c3863be8 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 44efe96dfca674a17b45ca53fc77fb69f1e29bf4 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 3c98fcc0461bd2a4b9c149d4748a7373a225cf4b Merge pull request #340 from jtsylve/macos_cxx17_fix by Matt Calabrese <38107210+mattcalabrese-google@users.noreply.github.com> - 74d91756c11bc22f9b0108b94da9326f7f9e376f Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - e6b050212c859fbaf67abac76105da10ec348274 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - c964fcffac27bd4a9ff67fe393410dd1146ef8b8 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 72e09a54d993b192db32be14c65adf7e9bd08c31 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - d65e19dfcd8697076f68598c0131c6930cdcd74d Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 5162fc83d2f3b79a9753ed59594c43966afdd37a Merge pull request #336 from shields/patch-2 by Shaindel Schwartz <31392632+shaindelschwartz@users.noreply.github.com> - 0389f7bf58fa41f35b3ad60be61d32f31e4f8ed6 Merge pull request #335 from shields/patch-1 by Shaindel Schwartz <31392632+shaindelschwartz@users.noreply.github.com> - e9324d926a9189e222741fce6e676f0944661a72 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a13d3df2b3ba68aeead92e2d078fba0510d55024 Merge pull request #323 from gosnik/master by Gennadiy Rozental <rogeeff@google.com> - 310a11865c97c5cdcc42a4ee2c2e3578423afe69 Merge pull request #324 from RasPat1/patch-1 by Gennadiy Rozental <rogeeff@google.com> - 8f11724067248acc330b4d1f12f0c76d03f2cfb1 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - b1dd425423380126f6441ce4fbb6f8f6c75b793a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 361cb8a9db2f2130442389fd80593255be26d681 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 0238ab0a831f179518c1a814f9584e99da2d75a3 Merge pull request #321 from christoph-cullmann/c4245_fix... by Xiaoyi Zhang <zhangxy988@gmail.com> - 61c9bf3e3e1c28a4aa6d7f1be4b37fd473bb5529 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - bc9101f9982391019521161a36179b52555ed212 Merge pull request #320 from christoph-cullmann/master by Xiaoyi Zhang <zhangxy988@gmail.com> - 2f76a9bf50046e396138cc8eeb3cdc17b7a5ac24 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 4adaf5490921f13028b55018c9f550277de5aebb Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 27c30ec671cb7b5ba84c4e79feff7fd0b0ac6338 Avoid undefined behavior when nullptr is passed to memcpy... by Roman Gershman <romange@gmail.com> - ce65f5ac3cbf897bb5e3de1a51d80fd00866abaa Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a18fc7461e7409c2ad64e28537261db1e02e76fa Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 8a394b19c149cab50534b04c5e21d42bc2217a7d Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - daf381e8535a1f1f1b8a75966a74e7cca63dee89 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - fa00c321073c7ea40a4fc3dfc8a06309eae3d025 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 436ba6c4a0ea3a06eca6e055f9c8d296bf3bae12 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 0cbdc774b97f7e80ab60dbe2ed4eaca3b2e33fc8 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 27c2f6e2f3b5929fbd322b0f0ca392eb02efd9f8 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - aa468ad75539619b47979911297efbb629c52e44 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - cd86d0d20ab167c33b23d3875db68d1d4bad3a3b Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 33841c5c963aa9c3f096ef8e6c1e71624b941940 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - ca3f87560a0eef716195cadf66dc6b938a579ec6 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - d902eb869bcfacc1bad14933ed9af4bed006d481 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 0b545b460141b882b244a1efcef7621d59278160 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - dbae8764fbd429bf7d7745e24bcf73962177a7c0 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 044da8a29c923506af0f0b46bc46f43c1e1300b5 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 6cc6ac44e065b9e8975fadfd6ccb99cbcf89aac4 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 666fc1266bccfd8e6eaaa084e7b42580bb8eb199 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 93dfcf74cb5fccae3da07897d8613ae6cab958a0 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 2c8421e1c6cef0da9e8a20b01c15256ec9ec116d Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 5b65c4af5107176555b23a638e5947686410ac1f Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - eab2078b53c9e3d9d240135c09d27e3393acb50a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 253eb7416421661873afbaa33828a850db978541 [CMake] Set correct flags for clang-cl (#278) by Loo Rong Jie <loorongjie@gmail.com> - e75672f6afc7e8f23ee7b532e86d1b3b9be3984e Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - bf29470384a101b307873b26d358433138c857fc Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 6fd827124facd8336981e73218997f9e73029b4f Merge pull request #280 from chiumichael/master by Derek Mauro <761129+derekmauro@users.noreply.github.com> - 7c7754fb3ed9ffb57d35fe8658f3ba4d73a31e72 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 256be563447a315f2a7993ec669460ba475fa86a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 88a152ae747c3c42dc9167d46c590929b048d436 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - c1cecb25a94c075725e9d2640f6b978a8f61957b Implement Span::first and Span::last from C++20 (#274) by Girts <girtsf@users.noreply.github.com> - 38b704384cd2f17590b3922b97744be0b43622c9 Changed HTTP URLs to HTTPS where possible (#270) by nik7273 <nik8470@gmail.com> - febc5ee6a92d0eb7dac1fceaa6c648cf6521b4dc Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 9fdf5e5b805412cb2a2e624d3e9a11588120465f Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 419f3184f8ebcdb23105295eadd2a569f3351eb9 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - b312c3cb53a0aad75a85ac2bf57c4a614fbd48d4 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 308ce31528a7edfa39f5f6d36142278a0ae1bf45 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 93d155bc4414f6c121bb1f19dba9fdb27c8943bc Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 426eaa4aa44e4580418bee46c1bd13911151bfb1 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 2901ec32a919311384d6ad4194e2d927c06831f7 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - d78310fe5a82f2e0e6e16509ef8079c8d7e4674e Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - a4cb1c8ba61531a63f9d309eea01ac1d43d8371d Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 540e2537b92cd4abfae6ceddfe24304345461f32 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 89ea0c5ff34aaa5855cfc7aa41f323b8a0ef0ede Merge pull request #255 from uilianries/hotfix/conan by ahedberg <ahedberg@google.com> - 5e0dcf72c64fae912184d2e0de87195fe8f0a425 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 0dffca4e36791c7beeda04da10460b534283948a Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 6b4201f9ef650637510a21b8d6cbcc3bee4a606f Fix GCC8 warnings by Boris Staletic <boris.staletic@gmail.com> - 0b1e6d417b414aad9282e32e8c49c719edeb63c1 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - efccc502606bed768e50a6cd5806d8eb13e4e935 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 5e6a78131f7bd5940218462c07d88cdefdd75dbe Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 5eea0f713c14ac17788b83e496f11903f8e2bbb0 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 66f9becbb98ecc083f4db349b4b1e0ca9de93b15 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 018b4db1d73ec8238e6dc4b17fd9e1fd7468d0ed Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 9449ae94397f2fd683851348e25ed8c93f75b3b9 Merge pull request #243 from ThomsonTan/FixIntrinsic by Alex Strelnikov <strel@google.com> - b16aeb6756bdab08cdf12d40baab5b51f7d15b16 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 7ffbe09f3d85504bd018783bbe1e2c12992fe47c Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 01b471d9f3ebef27f5aaca14b66509099fa8cd6c Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 7bd8f36c741c7cbe311611d7981bf38ba04c6fef Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 968a34ffdaadd7db062a9621dfbdf8b2d16e05af Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 3e2e9b5557e76d098de4b8a2a659125b98ca519b Merge pull request #231 from uilianries/feature/conan by Mark Barolak <mbxx@users.noreply.github.com> - 111ca7060a6ff50115ca85b59f6b5d8c8c5e9105 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 389ec3f906f018661a5308458d623d01f96d7b23 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 8fbcdb90952c57828c4a9c2f6d79fcd7cae9088f Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 455dc17ba1af9635f0b60155bc565bc572a1e722 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - f197d7c72a54064cfde5a2058f1513a4a0ee36fb Export of internal Abseil changes. by Abseil Team <absl-team@google.com> - 284378a71b32dfb3af4e3661f585e671d1b603a3 Export of internal Abseil changes. by Abseil Team <absl-team@google.com> GitOrigin-RevId: 9ee91d3e430fb33a4590486573792eb0fa146c2d Change-Id: Ia06e548bc106cc9d136f6c65714be6645317aced
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/BUILD.bazel197
-rw-r--r--absl/container/CMakeLists.txt190
-rw-r--r--absl/container/fixed_array.h7
-rw-r--r--absl/container/fixed_array_benchmark.cc11
-rw-r--r--absl/container/fixed_array_exception_safety_test.cc11
-rw-r--r--absl/container/fixed_array_test.cc77
-rw-r--r--absl/container/flat_hash_map.h25
-rw-r--r--absl/container/flat_hash_map_test.cc32
-rw-r--r--absl/container/flat_hash_set.h20
-rw-r--r--absl/container/flat_hash_set_test.cc14
-rw-r--r--absl/container/inlined_vector.h1504
-rw-r--r--absl/container/inlined_vector_benchmark.cc561
-rw-r--r--absl/container/inlined_vector_exception_safety_test.cc489
-rw-r--r--absl/container/inlined_vector_test.cc230
-rw-r--r--absl/container/internal/common.h198
-rw-r--r--absl/container/internal/compressed_tuple.h162
-rw-r--r--absl/container/internal/compressed_tuple_test.cc269
-rw-r--r--absl/container/internal/container_memory.h69
-rw-r--r--absl/container/internal/container_memory_test.cc6
-rw-r--r--absl/container/internal/counting_allocator.h81
-rw-r--r--absl/container/internal/hash_function_defaults.h11
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc28
-rw-r--r--absl/container/internal/hash_generator_testing.cc6
-rw-r--r--absl/container/internal/hash_generator_testing.h6
-rw-r--r--absl/container/internal/hash_policy_testing.h6
-rw-r--r--absl/container/internal/hash_policy_testing_test.cc6
-rw-r--r--absl/container/internal/hash_policy_traits.h6
-rw-r--r--absl/container/internal/hash_policy_traits_test.cc6
-rw-r--r--absl/container/internal/hashtable_debug.h8
-rw-r--r--absl/container/internal/hashtable_debug_hooks.h6
-rw-r--r--absl/container/internal/hashtablez_sampler.cc310
-rw-r--r--absl/container/internal/hashtablez_sampler.h290
-rw-r--r--absl/container/internal/hashtablez_sampler_force_weak_definition.cc29
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc357
-rw-r--r--absl/container/internal/have_sse.h49
-rw-r--r--absl/container/internal/inlined_vector.h895
-rw-r--r--absl/container/internal/layout.h9
-rw-r--r--absl/container/internal/layout_test.cc34
-rw-r--r--absl/container/internal/node_hash_policy.h6
-rw-r--r--absl/container/internal/node_hash_policy_test.cc6
-rw-r--r--absl/container/internal/raw_hash_map.h21
-rw-r--r--absl/container/internal/raw_hash_set.cc6
-rw-r--r--absl/container/internal/raw_hash_set.h392
-rw-r--r--absl/container/internal/raw_hash_set_allocator_test.cc6
-rw-r--r--absl/container/internal/raw_hash_set_test.cc304
-rw-r--r--absl/container/internal/test_instance_tracker.cc6
-rw-r--r--absl/container/internal/test_instance_tracker.h18
-rw-r--r--absl/container/internal/test_instance_tracker_test.cc4
-rw-r--r--absl/container/internal/tracked.h6
-rw-r--r--absl/container/internal/unordered_map_constructor_test.h148
-rw-r--r--absl/container/internal/unordered_map_lookup_test.h8
-rw-r--r--absl/container/internal/unordered_map_members_test.h87
-rw-r--r--absl/container/internal/unordered_map_modifiers_test.h8
-rw-r--r--absl/container/internal/unordered_map_test.cc14
-rw-r--r--absl/container/internal/unordered_set_constructor_test.h155
-rw-r--r--absl/container/internal/unordered_set_lookup_test.h8
-rw-r--r--absl/container/internal/unordered_set_members_test.h86
-rw-r--r--absl/container/internal/unordered_set_modifiers_test.h8
-rw-r--r--absl/container/internal/unordered_set_test.cc24
-rw-r--r--absl/container/node_hash_map.h6
-rw-r--r--absl/container/node_hash_map_test.cc14
-rw-r--r--absl/container/node_hash_set.h6
-rw-r--r--absl/container/node_hash_set_test.cc16
63 files changed, 5574 insertions, 2004 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index afc869f4..9e2a5b1e 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -5,7 +5,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,11 +15,12 @@
#
load(
- "//absl:copts.bzl",
+ "//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
- "ABSL_TEST_COPTS",
+ "ABSL_DEFAULT_LINKOPTS",
"ABSL_EXCEPTIONS_FLAG",
"ABSL_EXCEPTIONS_FLAG_LINKOPTS",
+ "ABSL_TEST_COPTS",
)
package(default_visibility = ["//visibility:public"])
@@ -30,6 +31,7 @@ cc_library(
name = "compressed_tuple",
hdrs = ["internal/compressed_tuple.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/utility",
],
@@ -39,8 +41,14 @@ cc_test(
name = "compressed_tuple_test",
srcs = ["internal/compressed_tuple_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":compressed_tuple",
+ ":test_instance_tracker",
+ "//absl/memory",
+ "//absl/types:any",
+ "//absl/types:optional",
+ "//absl/utility",
"@com_google_googletest//:gtest_main",
],
)
@@ -49,6 +57,7 @@ cc_library(
name = "fixed_array",
hdrs = ["fixed_array.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":compressed_tuple",
"//absl/algorithm",
@@ -63,7 +72,7 @@ cc_test(
name = "fixed_array_test",
srcs = ["fixed_array_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
- linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS,
+ linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":fixed_array",
"//absl/base:exception_testing",
@@ -77,6 +86,7 @@ cc_test(
name = "fixed_array_test_noexceptions",
srcs = ["fixed_array_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":fixed_array",
"//absl/base:exception_testing",
@@ -90,7 +100,7 @@ cc_test(
name = "fixed_array_exception_safety_test",
srcs = ["fixed_array_exception_safety_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
- linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS,
+ linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":fixed_array",
"//absl/base:exception_safety_testing",
@@ -102,6 +112,7 @@ cc_test(
name = "fixed_array_benchmark",
srcs = ["fixed_array_benchmark.cc"],
copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
deps = [
":fixed_array",
@@ -110,10 +121,26 @@ cc_test(
)
cc_library(
+ name = "inlined_vector_internal",
+ hdrs = ["internal/inlined_vector.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":compressed_tuple",
+ "//absl/base:core_headers",
+ "//absl/memory",
+ "//absl/meta:type_traits",
+ "//absl/types:span",
+ ],
+)
+
+cc_library(
name = "inlined_vector",
hdrs = ["inlined_vector.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":inlined_vector_internal",
"//absl/algorithm",
"//absl/base:core_headers",
"//absl/base:throw_delegate",
@@ -121,12 +148,22 @@ cc_library(
],
)
+cc_library(
+ name = "counting_allocator",
+ testonly = 1,
+ hdrs = ["internal/counting_allocator.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+)
+
cc_test(
name = "inlined_vector_test",
srcs = ["inlined_vector_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
- linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS,
+ linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":counting_allocator",
":inlined_vector",
":test_instance_tracker",
"//absl/base",
@@ -143,7 +180,9 @@ cc_test(
name = "inlined_vector_test_noexceptions",
srcs = ["inlined_vector_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":counting_allocator",
":inlined_vector",
":test_instance_tracker",
"//absl/base",
@@ -160,30 +199,46 @@ cc_test(
name = "inlined_vector_benchmark",
srcs = ["inlined_vector_benchmark.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
deps = [
":inlined_vector",
"//absl/base",
+ "//absl/base:core_headers",
"//absl/strings",
"@com_github_google_benchmark//:benchmark_main",
],
)
+cc_test(
+ name = "inlined_vector_exception_safety_test",
+ srcs = ["inlined_vector_exception_safety_test.cc"],
+ copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
+ deps = [
+ ":inlined_vector",
+ "//absl/base:exception_safety_testing",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_library(
name = "test_instance_tracker",
testonly = 1,
srcs = ["internal/test_instance_tracker.cc"],
hdrs = ["internal/test_instance_tracker.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
+ deps = ["//absl/types:compare"],
)
cc_test(
name = "test_instance_tracker_test",
srcs = ["internal/test_instance_tracker_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":test_instance_tracker",
"@com_google_googletest//:gtest_main",
@@ -208,6 +263,7 @@ cc_library(
name = "flat_hash_map",
hdrs = ["flat_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
@@ -220,13 +276,15 @@ cc_library(
cc_test(
name = "flat_hash_map_test",
srcs = ["flat_hash_map_test.cc"],
- copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":flat_hash_map",
":hash_generator_testing",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
+ ":unordered_map_members_test",
":unordered_map_modifiers_test",
"//absl/types:any",
"@com_google_googletest//:gtest_main",
@@ -237,6 +295,7 @@ cc_library(
name = "flat_hash_set",
hdrs = ["flat_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
@@ -251,12 +310,14 @@ cc_test(
name = "flat_hash_set_test",
srcs = ["flat_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":flat_hash_set",
":hash_generator_testing",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
+ ":unordered_set_members_test",
":unordered_set_modifiers_test",
"//absl/memory",
"//absl/strings",
@@ -268,6 +329,7 @@ cc_library(
name = "node_hash_map",
hdrs = ["node_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
@@ -281,7 +343,8 @@ cc_library(
cc_test(
name = "node_hash_map_test",
srcs = ["node_hash_map_test.cc"],
- copts = ABSL_TEST_COPTS + ["-DUNORDERED_MAP_CXX17"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":hash_generator_testing",
@@ -289,6 +352,7 @@ cc_test(
":tracked",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
+ ":unordered_map_members_test",
":unordered_map_modifiers_test",
"@com_google_googletest//:gtest_main",
],
@@ -298,6 +362,7 @@ cc_library(
name = "node_hash_set",
hdrs = ["node_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_function_defaults",
":node_hash_policy",
@@ -311,12 +376,13 @@ cc_test(
name = "node_hash_set_test",
srcs = ["node_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
- ":hash_generator_testing",
":node_hash_set",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
+ ":unordered_set_members_test",
":unordered_set_modifiers_test",
"@com_google_googletest//:gtest_main",
],
@@ -326,6 +392,7 @@ cc_library(
name = "container_memory",
hdrs = ["internal/container_memory.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/memory",
"//absl/utility",
@@ -336,6 +403,7 @@ cc_test(
name = "container_memory_test",
srcs = ["internal/container_memory_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":container_memory",
@@ -348,6 +416,7 @@ cc_library(
name = "hash_function_defaults",
hdrs = ["internal/hash_function_defaults.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:config",
"//absl/hash",
@@ -359,6 +428,7 @@ cc_test(
name = "hash_function_defaults_test",
srcs = ["internal/hash_function_defaults_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS,
deps = [
":hash_function_defaults",
@@ -374,6 +444,7 @@ cc_library(
srcs = ["internal/hash_generator_testing.cc"],
hdrs = ["internal/hash_generator_testing.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_testing",
"//absl/meta:type_traits",
@@ -386,6 +457,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/hash_policy_testing.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/hash",
"//absl/strings",
@@ -396,6 +468,7 @@ cc_test(
name = "hash_policy_testing_test",
srcs = ["internal/hash_policy_testing_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_testing",
"@com_google_googletest//:gtest_main",
@@ -406,6 +479,7 @@ cc_library(
name = "hash_policy_traits",
hdrs = ["internal/hash_policy_traits.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/meta:type_traits"],
)
@@ -413,6 +487,7 @@ cc_test(
name = "hash_policy_traits_test",
srcs = ["internal/hash_policy_traits_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_traits",
"@com_google_googletest//:gtest_main",
@@ -423,6 +498,7 @@ cc_library(
name = "hashtable_debug",
hdrs = ["internal/hashtable_debug.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hashtable_debug_hooks",
],
@@ -432,18 +508,56 @@ cc_library(
name = "hashtable_debug_hooks",
hdrs = ["internal/hashtable_debug_hooks.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+)
+
+cc_library(
+ name = "hashtablez_sampler",
+ srcs = [
+ "internal/hashtablez_sampler.cc",
+ "internal/hashtablez_sampler_force_weak_definition.cc",
+ ],
+ hdrs = ["internal/hashtablez_sampler.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":have_sse",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/debugging:stacktrace",
+ "//absl/memory",
+ "//absl/synchronization",
+ "//absl/utility",
+ ],
+)
+
+cc_test(
+ name = "hashtablez_sampler_test",
+ srcs = ["internal/hashtablez_sampler_test.cc"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hashtablez_sampler",
+ ":have_sse",
+ "//absl/base:core_headers",
+ "//absl/synchronization",
+ "//absl/synchronization:thread_pool",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
)
cc_library(
name = "node_hash_policy",
hdrs = ["internal/node_hash_policy.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_test(
name = "node_hash_policy_test",
srcs = ["internal/node_hash_policy_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_traits",
":node_hash_policy",
@@ -455,9 +569,30 @@ cc_library(
name = "raw_hash_map",
hdrs = ["internal/raw_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":raw_hash_set",
+ "//absl/base:throw_delegate",
+ ],
+)
+
+cc_library(
+ name = "have_sse",
+ hdrs = ["internal/have_sse.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+)
+
+cc_library(
+ name = "common",
+ hdrs = ["internal/common.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/meta:type_traits",
+ "//absl/types:optional",
],
)
@@ -466,11 +601,15 @@ cc_library(
srcs = ["internal/raw_hash_set.cc"],
hdrs = ["internal/raw_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":common",
":compressed_tuple",
":container_memory",
":hash_policy_traits",
":hashtable_debug_hooks",
+ ":hashtablez_sampler",
+ ":have_sse",
":layout",
"//absl/base:bits",
"//absl/base:config",
@@ -478,7 +617,6 @@ cc_library(
"//absl/base:endian",
"//absl/memory",
"//absl/meta:type_traits",
- "//absl/types:optional",
"//absl/utility",
],
)
@@ -507,6 +645,7 @@ cc_test(
size = "small",
srcs = ["internal/raw_hash_set_allocator_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":raw_hash_set",
":tracked",
@@ -519,6 +658,7 @@ cc_library(
name = "layout",
hdrs = ["internal/layout.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:core_headers",
"//absl/meta:type_traits",
@@ -533,6 +673,7 @@ cc_test(
size = "small",
srcs = ["internal/layout_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS,
visibility = ["//visibility:private"],
deps = [
@@ -549,6 +690,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/tracked.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_library(
@@ -556,6 +698,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/unordered_map_constructor_test.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
@@ -568,6 +711,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/unordered_map_lookup_test.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
@@ -580,6 +724,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/unordered_map_modifiers_test.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
@@ -592,9 +737,35 @@ cc_library(
testonly = 1,
hdrs = ["internal/unordered_set_constructor_test.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
+ "//absl/meta:type_traits",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_set_members_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_set_members_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/meta:type_traits",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_map_members_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_map_members_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/meta:type_traits",
"@com_google_googletest//:gtest",
],
)
@@ -604,6 +775,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/unordered_set_lookup_test.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
@@ -616,6 +788,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/unordered_set_modifiers_test.h"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
@@ -627,10 +800,12 @@ cc_test(
name = "unordered_set_test",
srcs = ["internal/unordered_set_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":unordered_set_constructor_test",
":unordered_set_lookup_test",
+ ":unordered_set_members_test",
":unordered_set_modifiers_test",
"@com_google_googletest//:gtest_main",
],
@@ -640,10 +815,12 @@ cc_test(
name = "unordered_map_test",
srcs = ["internal/unordered_map_test.cc"],
copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":unordered_map_constructor_test",
":unordered_map_lookup_test",
+ ":unordered_map_members_test",
":unordered_map_modifiers_test",
"@com_google_googletest//:gtest_main",
],
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 8605facc..7988b12f 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -5,7 +5,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,10 +20,6 @@
absl_cc_library(
NAME
container
- SRCS
- "internal/raw_hash_set.cc"
- COPTS
- ${ABSL_DEFAULT_COPTS}
PUBLIC
)
@@ -31,7 +27,9 @@ absl_cc_library(
NAME
compressed_tuple
HDRS
- "internal/compressed_tuple.h"
+ "internal/compressed_tuple.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
DEPS
absl::utility
PUBLIC
@@ -42,8 +40,15 @@ absl_cc_test(
compressed_tuple_test
SRCS
"internal/compressed_tuple_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
+ absl::any
absl::compressed_tuple
+ absl::memory
+ absl::optional
+ absl::test_instance_tracker
+ absl::utility
gmock_main
)
@@ -70,6 +75,7 @@ absl_cc_test(
SRCS
"fixed_array_test.cc"
COPTS
+ ${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
@@ -86,6 +92,8 @@ absl_cc_test(
fixed_array_test_noexceptions
SRCS
"fixed_array_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::fixed_array
absl::exception_testing
@@ -100,6 +108,7 @@ absl_cc_test(
SRCS
"fixed_array_exception_safety_test.cc"
COPTS
+ ${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
@@ -111,6 +120,22 @@ absl_cc_test(
absl_cc_library(
NAME
+ inlined_vector_internal
+ HDRS
+ "internal/inlined_vector.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::compressed_tuple
+ absl::core_headers
+ absl::memory
+ absl::span
+ absl::type_traits
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
inlined_vector
HDRS
"inlined_vector.h"
@@ -119,21 +144,33 @@ absl_cc_library(
DEPS
absl::algorithm
absl::core_headers
+ absl::inlined_vector_internal
absl::throw_delegate
absl::memory
PUBLIC
)
+absl_cc_library(
+ NAME
+ counting_allocator
+ HDRS
+ "internal/counting_allocator.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+)
+
absl_cc_test(
NAME
inlined_vector_test
SRCS
"inlined_vector_test.cc"
COPTS
+ ${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
DEPS
+ absl::counting_allocator
absl::inlined_vector
absl::test_instance_tracker
absl::base
@@ -150,6 +187,8 @@ absl_cc_test(
inlined_vector_test_noexceptions
SRCS
"inlined_vector_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::inlined_vector
absl::test_instance_tracker
@@ -162,6 +201,22 @@ absl_cc_test(
gmock_main
)
+absl_cc_test(
+ NAME
+ inlined_vector_exception_safety_test
+ SRCS
+ "inlined_vector_exception_safety_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ ${ABSL_EXCEPTIONS_FLAG}
+ LINKOPTS
+ ${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
+ DEPS
+ absl::inlined_vector
+ absl::exception_safety_testing
+ gmock_main
+)
+
absl_cc_library(
NAME
test_instance_tracker
@@ -171,6 +226,8 @@ absl_cc_library(
"internal/test_instance_tracker.cc"
COPTS
${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::compare
TESTONLY
)
@@ -179,6 +236,8 @@ absl_cc_test(
test_instance_tracker_test
SRCS
"internal/test_instance_tracker_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::test_instance_tracker
gmock_main
@@ -206,12 +265,13 @@ absl_cc_test(
SRCS
"flat_hash_map_test.cc"
COPTS
- "-DUNORDERED_MAP_CXX17"
+ ${ABSL_TEST_COPTS}
DEPS
absl::flat_hash_map
absl::hash_generator_testing
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
+ absl::unordered_map_members_test
absl::unordered_map_modifiers_test
absl::any
gmock_main
@@ -240,12 +300,14 @@ absl_cc_test(
SRCS
"flat_hash_set_test.cc"
COPTS
+ ${ABSL_TEST_COPTS}
"-DUNORDERED_SET_CXX17"
DEPS
absl::flat_hash_set
absl::hash_generator_testing
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
+ absl::unordered_set_members_test
absl::unordered_set_modifiers_test
absl::memory
absl::strings
@@ -275,13 +337,14 @@ absl_cc_test(
SRCS
"node_hash_map_test.cc"
COPTS
- "-DUNORDERED_MAP_CXX17"
+ ${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::node_hash_map
absl::tracked
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
+ absl::unordered_map_members_test
absl::unordered_map_modifiers_test
gmock_main
)
@@ -308,12 +371,14 @@ absl_cc_test(
SRCS
"node_hash_set_test.cc"
COPTS
+ ${ABSL_TEST_COPTS}
"-DUNORDERED_SET_CXX17"
DEPS
absl::hash_generator_testing
absl::node_hash_set
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
+ absl::unordered_set_members_test
absl::unordered_set_modifiers_test
gmock_main
)
@@ -336,6 +401,8 @@ absl_cc_test(
container_memory_test
SRCS
"internal/container_memory_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::container_memory
absl::strings
@@ -361,6 +428,8 @@ absl_cc_test(
hash_function_defaults_test
SRCS
"internal/hash_function_defaults_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::hash_function_defaults
absl::hash
@@ -402,6 +471,8 @@ absl_cc_test(
hash_policy_testing_test
SRCS
"internal/hash_policy_testing_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_testing
gmock_main
@@ -424,6 +495,8 @@ absl_cc_test(
hash_policy_traits_test
SRCS
"internal/hash_policy_traits_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
gmock_main
@@ -431,6 +504,35 @@ absl_cc_test(
absl_cc_library(
NAME
+ hashtablez_sampler
+ HDRS
+ "internal/hashtablez_sampler.h"
+ SRCS
+ "internal/hashtablez_sampler.cc"
+ "internal/hashtablez_sampler_force_weak_definition.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::base
+ absl::have_sse
+ absl::synchronization
+)
+
+absl_cc_test(
+ NAME
+ hashtablez_sampler_test
+ SRCS
+ "internal/hashtablez_sampler_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hashtablez_sampler
+ absl::have_sse
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
hashtable_debug
HDRS
"internal/hashtable_debug.h"
@@ -452,6 +554,15 @@ absl_cc_library(
absl_cc_library(
NAME
+ have_sse
+ HDRS
+ "internal/have_sse.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_library(
+ NAME
node_hash_policy
HDRS
"internal/node_hash_policy.h"
@@ -465,6 +576,8 @@ absl_cc_test(
node_hash_policy_test
SRCS
"internal/node_hash_policy_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
absl::node_hash_policy
@@ -481,11 +594,23 @@ absl_cc_library(
DEPS
absl::container_memory
absl::raw_hash_set
+ absl::throw_delegate
PUBLIC
)
absl_cc_library(
NAME
+ container_common
+ HDRS
+ "internal/commom.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::type_traits
+)
+
+absl_cc_library(
+ NAME
raw_hash_set
HDRS
"internal/raw_hash_set.h"
@@ -494,19 +619,22 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::bits
absl::compressed_tuple
+ absl::config
+ absl::container_common
absl::container_memory
+ absl::core_headers
+ absl::endian
absl::hash_policy_traits
absl::hashtable_debug_hooks
+ absl::have_sse
absl::layout
- absl::bits
- absl::config
- absl::core_headers
- absl::endian
absl::memory
absl::meta
absl::optional
absl::utility
+ absl::hashtablez_sampler
PUBLIC
)
@@ -515,6 +643,8 @@ absl_cc_test(
raw_hash_set_test
SRCS
"internal/raw_hash_set_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::container_memory
absl::hash_function_defaults
@@ -532,6 +662,8 @@ absl_cc_test(
raw_hash_set_allocator_test
SRCS
"internal/raw_hash_set_allocator_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::raw_hash_set
absl::tracked
@@ -560,6 +692,8 @@ absl_cc_test(
layout_test
SRCS
"internal/layout_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::layout
absl::base
@@ -608,6 +742,19 @@ absl_cc_library(
absl_cc_library(
NAME
+ unordered_map_members_test
+ HDRS
+ "internal/unordered_map_members_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::type_traits
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
unordered_map_modifiers_test
HDRS
"internal/unordered_map_modifiers_test.h"
@@ -650,6 +797,19 @@ absl_cc_library(
absl_cc_library(
NAME
+ unordered_set_members_test
+ HDRS
+ "internal/unordered_set_members_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::type_traits
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
unordered_set_modifiers_test
HDRS
"internal/unordered_set_modifiers_test.h"
@@ -667,9 +827,12 @@ absl_cc_test(
unordered_set_test
SRCS
"internal/unordered_set_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
+ absl::unordered_set_members_test
absl::unordered_set_modifiers_test
gmock_main
)
@@ -679,9 +842,12 @@ absl_cc_test(
unordered_map_test
SRCS
"internal/unordered_map_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
DEPS
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
+ absl::unordered_map_members_test
absl::unordered_map_modifiers_test
gmock_main
)
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index 7f6a3afd..1e0da5eb 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -51,7 +51,7 @@
#include "absl/memory/memory.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
@@ -515,6 +515,7 @@ void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
#endif // ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
+
#endif // ABSL_CONTAINER_FIXED_ARRAY_H_
diff --git a/absl/container/fixed_array_benchmark.cc b/absl/container/fixed_array_benchmark.cc
index b4f0cf2a..3c7a5a72 100644
--- a/absl/container/fixed_array_benchmark.cc
+++ b/absl/container/fixed_array_benchmark.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "absl/container/fixed_array.h"
-
#include <stddef.h>
+
#include <string>
#include "benchmark/benchmark.h"
+#include "absl/container/fixed_array.h"
namespace {
@@ -25,8 +25,9 @@ namespace {
// set an int to a constant..
class SimpleClass {
public:
- SimpleClass() : i(3) { }
+ SimpleClass() : i(3) {}
~SimpleClass() { i = 0; }
+
private:
int i;
};
diff --git a/absl/container/fixed_array_exception_safety_test.cc b/absl/container/fixed_array_exception_safety_test.cc
index 4d0430b3..4a67bb46 100644
--- a/absl/container/fixed_array_exception_safety_test.cc
+++ b/absl/container/fixed_array_exception_safety_test.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,13 +14,12 @@
#include <initializer_list>
-#include "absl/container/fixed_array.h"
-
#include "gtest/gtest.h"
#include "absl/base/internal/exception_safety_testing.h"
+#include "absl/container/fixed_array.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace {
@@ -115,5 +114,5 @@ TEST(FixedArrayExceptionSafety, Fill) {
} // namespace
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc
index 205ff41f..2b1cf47e 100644
--- a/absl/container/fixed_array_test.cc
+++ b/absl/container/fixed_array_test.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,6 +15,7 @@
#include "absl/container/fixed_array.h"
#include <stdio.h>
+
#include <cstring>
#include <list>
#include <memory>
@@ -42,11 +43,7 @@ static bool IsOnStack(const ArrayType& a) {
class ConstructionTester {
public:
- ConstructionTester()
- : self_ptr_(this),
- value_(0) {
- constructions++;
- }
+ ConstructionTester() : self_ptr_(this), value_(0) { constructions++; }
~ConstructionTester() {
assert(self_ptr_ == this);
self_ptr_ = nullptr;
@@ -58,9 +55,7 @@ class ConstructionTester {
static int constructions;
static int destructions;
- void CheckConstructed() {
- assert(self_ptr_ == this);
- }
+ void CheckConstructed() { assert(self_ptr_ == this); }
void set(int value) { value_ = value; }
int get() { return value_; }
@@ -150,7 +145,7 @@ TEST(FixedArrayTest, SmallObjects) {
}
{
- // Arrays of > default size should be on the stack
+ // Arrays of > default size should be on the heap
absl::FixedArray<int, 100> array(101);
EXPECT_FALSE(IsOnStack(array));
}
@@ -160,13 +155,13 @@ TEST(FixedArrayTest, SmallObjects) {
// same amount of stack space
absl::FixedArray<int> array1(0);
absl::FixedArray<char> array2(0);
- EXPECT_LE(sizeof(array1), sizeof(array2)+100);
- EXPECT_LE(sizeof(array2), sizeof(array1)+100);
+ EXPECT_LE(sizeof(array1), sizeof(array2) + 100);
+ EXPECT_LE(sizeof(array2), sizeof(array1) + 100);
}
{
// Ensure that vectors are properly constructed inside a fixed array.
- absl::FixedArray<std::vector<int> > array(2);
+ absl::FixedArray<std::vector<int>> array(2);
EXPECT_EQ(0, array[0].size());
EXPECT_EQ(0, array[1].size());
}
@@ -270,8 +265,8 @@ static void TestArray(int n) {
array.data()[i].set(i + 1);
}
for (int i = 0; i < n; i++) {
- EXPECT_THAT(array[i].get(), i+1);
- EXPECT_THAT(array.data()[i].get(), i+1);
+ EXPECT_THAT(array[i].get(), i + 1);
+ EXPECT_THAT(array.data()[i].get(), i + 1);
}
} // Close scope containing 'array'.
@@ -296,7 +291,7 @@ static void TestArrayOfArrays(int n) {
ASSERT_EQ(array.size(), n);
ASSERT_EQ(array.memsize(),
- sizeof(ConstructionTester) * elements_per_inner_array * n);
+ sizeof(ConstructionTester) * elements_per_inner_array * n);
ASSERT_EQ(array.begin() + n, array.end());
// Check that all elements were constructed
@@ -316,7 +311,7 @@ static void TestArrayOfArrays(int n) {
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
- ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j);
+ ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j);
ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j);
}
}
@@ -329,8 +324,7 @@ static void TestArrayOfArrays(int n) {
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
- ASSERT_EQ((array[i])[j].get(),
- (i + 1) * elements_per_inner_array + j);
+ ASSERT_EQ((array[i])[j].get(), (i + 1) * elements_per_inner_array + j);
ASSERT_EQ((array.data()[i])[j].get(),
(i + 1) * elements_per_inner_array + j);
}
@@ -343,7 +337,7 @@ static void TestArrayOfArrays(int n) {
}
TEST(IteratorConstructorTest, NonInline) {
- int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int, ABSL_ARRAYSIZE(kInput) - 1> const fixed(
kInput, kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
@@ -353,7 +347,7 @@ TEST(IteratorConstructorTest, NonInline) {
}
TEST(IteratorConstructorTest, Inline) {
- int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int, ABSL_ARRAYSIZE(kInput)> const fixed(
kInput, kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
@@ -363,9 +357,10 @@ TEST(IteratorConstructorTest, Inline) {
}
TEST(IteratorConstructorTest, NonPod) {
- char const* kInput[] =
- { "red", "orange", "yellow", "green", "blue", "indigo", "violet" };
- absl::FixedArray<std::string> const fixed(kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ char const* kInput[] = {"red", "orange", "yellow", "green",
+ "blue", "indigo", "violet"};
+ absl::FixedArray<std::string> const fixed(kInput,
+ kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
ASSERT_EQ(kInput[i], fixed[i]);
@@ -380,7 +375,7 @@ TEST(IteratorConstructorTest, FromEmptyVector) {
}
TEST(IteratorConstructorTest, FromNonEmptyVector) {
- int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
std::vector<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
absl::FixedArray<int> const fixed(items.begin(), items.end());
ASSERT_EQ(items.size(), fixed.size());
@@ -390,7 +385,7 @@ TEST(IteratorConstructorTest, FromNonEmptyVector) {
}
TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) {
- int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
std::list<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
absl::FixedArray<int> const fixed(items.begin(), items.end());
EXPECT_THAT(fixed, testing::ElementsAreArray(kInput));
@@ -507,9 +502,8 @@ struct PickyDelete {
TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray<PickyDelete, 0> a(5); }
-
TEST(FixedArrayTest, Data) {
- static const int kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ static const int kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int> fa(std::begin(kInput), std::end(kInput));
EXPECT_EQ(fa.data(), &*fa.begin());
EXPECT_EQ(fa.data(), &fa[0]);
@@ -823,7 +817,7 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
#ifdef ADDRESS_SANITIZER
TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
absl::FixedArray<int, 32> a(10);
- int *raw = a.data();
+ int* raw = a.data();
raw[0] = 0;
raw[9] = 0;
EXPECT_DEATH(raw[-2] = 0, "container-overflow");
@@ -834,7 +828,7 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
absl::FixedArray<char, 17> a(12);
- char *raw = a.data();
+ char* raw = a.data();
raw[0] = 0;
raw[11] = 0;
EXPECT_DEATH(raw[-7] = 0, "container-overflow");
@@ -845,7 +839,7 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
absl::FixedArray<uint64_t, 20> a(20);
- uint64_t *raw = a.data();
+ uint64_t* raw = a.data();
raw[0] = 0;
raw[19] = 0;
EXPECT_DEATH(raw[-1] = 0, "container-overflow");
@@ -854,7 +848,7 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
absl::FixedArray<ThreeInts> a(10);
- ThreeInts *raw = a.data();
+ ThreeInts* raw = a.data();
raw[0] = ThreeInts();
raw[9] = ThreeInts();
// Note: raw[-1] is pointing to 12 bytes before the container range. However,
@@ -869,4 +863,21 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
}
#endif // ADDRESS_SANITIZER
+TEST(FixedArrayTest, AbslHashValueWorks) {
+ using V = absl::FixedArray<int>;
+ std::vector<V> cases;
+
+ // Generate a variety of vectors some of these are small enough for the inline
+ // space but are stored out of line.
+ for (int i = 0; i < 10; ++i) {
+ V v(i);
+ for (int j = 0; j < i; ++j) {
+ v[j] = j;
+ }
+ cases.push_back(v);
+ }
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
+}
+
} // namespace
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index ed453348..a711398e 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -42,7 +42,7 @@
#include "absl/memory/memory.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class K, class V>
struct FlatHashMapPolicy;
@@ -78,7 +78,7 @@ struct FlatHashMapPolicy;
// NOTE: A `flat_hash_map` stores its value types directly inside its
// implementation array to avoid memory indirection. Because a `flat_hash_map`
// is designed to move data when rehashed, map values will not retain pointer
-// stability. If you require pointer stability, or your values are large,
+// stability. If you require pointer stability, or if your values are large,
// consider using `absl::flat_hash_map<Key, std::unique_ptr<Value>>` instead.
// If your types are not moveable or you require pointer stability for keys,
// consider `absl::node_hash_map`.
@@ -220,8 +220,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// Erases the element at `position` of the `flat_hash_map`, returning
// `void`.
//
- // NOTE: this return behavior is different than that of STL containers in
- // general and `std::unordered_map` in particular.
+ // NOTE: returning `void` in this case is different than that of STL
+ // containers in general and `std::unordered_map` in particular (which
+ // return an iterator to the element following the erased element). If that
+ // iterator is needed, simply post increment the iterator:
+ //
+ // map.erase(it++);
//
// iterator erase(const_iterator first, const_iterator last):
//
@@ -528,25 +532,26 @@ namespace container_internal {
template <class K, class V>
struct FlatHashMapPolicy {
- using slot_type = container_internal::slot_type<K, V>;
+ using slot_policy = container_internal::map_slot_policy<K, V>;
+ using slot_type = typename slot_policy::slot_type;
using key_type = K;
using mapped_type = V;
using init_type = std::pair</*non const*/ key_type, mapped_type>;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
- slot_type::construct(alloc, slot, std::forward<Args>(args)...);
+ slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
}
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
- slot_type::destroy(alloc, slot);
+ slot_policy::destroy(alloc, slot);
}
template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
- slot_type::transfer(alloc, new_slot, old_slot);
+ slot_policy::transfer(alloc, new_slot, old_slot);
}
template <class F, class... Args>
@@ -576,7 +581,7 @@ struct IsUnorderedContainer<
} // namespace container_algorithm_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index 02d2fa81..3f11a52c 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,11 +17,12 @@
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
#include "absl/types/any.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
using ::absl::container_internal::hash_internal::Enum;
@@ -31,19 +32,20 @@ using ::testing::Pair;
using ::testing::UnorderedElementsAre;
template <class K, class V>
-using Map =
- flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual, Alloc<>>;
+using Map = flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::pair<const K, V>>>;
static_assert(!std::is_standard_layout<NonStandardLayout>(), "");
using MapTypes =
- ::testing::Types<Map<int, int>, Map<std::string, int>, Map<Enum, std::string>,
- Map<EnumClass, int>, Map<int, NonStandardLayout>,
- Map<NonStandardLayout, int>>;
+ ::testing::Types<Map<int, int>, Map<std::string, int>,
+ Map<Enum, std::string>, Map<EnumClass, int>,
+ Map<int, NonStandardLayout>, Map<NonStandardLayout, int>>;
-INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ConstructorTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, LookupTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(FlatHashMap, ModifiersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ModifiersTest, MapTypes);
TEST(FlatHashMap, StandardLayout) {
struct Int {
@@ -140,6 +142,7 @@ TEST(FlatHashMap, LazyKeyPattern) {
int conversions = 0;
int hashes = 0;
flat_hash_map<size_t, size_t, Hash, Eq> m(0, Hash{&hashes});
+ m.reserve(3);
m[LazyInt(1, &conversions)] = 1;
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1)));
@@ -204,7 +207,9 @@ TEST(FlatHashMap, MergeExtractInsert) {
m.insert(std::move(node));
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9)));
}
-#if !defined(__ANDROID__) && !defined(__APPLE__) && !defined(__EMSCRIPTEN__)
+
+#if (defined(ABSL_HAVE_STD_ANY) || !defined(_LIBCPP_VERSION)) && \
+ !defined(__EMSCRIPTEN__)
TEST(FlatHashMap, Any) {
absl::flat_hash_map<int, absl::any> m;
m.emplace(1, 7);
@@ -235,9 +240,10 @@ TEST(FlatHashMap, Any) {
ASSERT_NE(it2, m2.end());
EXPECT_EQ(7, it2->second);
}
-#endif // __ANDROID__
+#endif // (defined(ABSL_HAVE_STD_ANY) || !defined(_LIBCPP_VERSION)) &&
+ // !defined(__EMSCRIPTEN__)
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index b175b1bf..8adbbcd5 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -40,7 +40,7 @@
#include "absl/memory/memory.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <typename T>
struct FlatHashSetPolicy;
@@ -56,9 +56,9 @@ struct FlatHashSetPolicy;
// following notable differences:
//
// * Requires keys that are CopyConstructible
-// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
-// `insert()`, provided that the set is provided a compatible heterogeneous
-// hashing function and equality operator.
+// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
+// that the set is provided a compatible heterogeneous hashing function and
+// equality operator.
// * Invalidates any references and pointers to elements within the table after
// `rehash()`.
// * Contains a `capacity()` member function indicating the number of element
@@ -213,8 +213,12 @@ class flat_hash_set
// Erases the element at `position` of the `flat_hash_set`, returning
// `void`.
//
- // NOTE: this return behavior is different than that of STL containers in
- // general and `std::unordered_map` in particular.
+ // NOTE: returning `void` in this case is different than that of STL
+ // containers in general and `std::unordered_set` in particular (which
+ // return an iterator to the element following the erased element). If that
+ // iterator is needed, simply post increment the iterator:
+ //
+ // set.erase(it++);
//
// iterator erase(const_iterator first, const_iterator last):
//
@@ -485,7 +489,7 @@ struct IsUnorderedContainer<absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>>
} // namespace container_algorithm_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index cabc2b59..56140bbe 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,12 +19,13 @@
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -41,9 +42,10 @@ using Set =
using SetTypes =
::testing::Types<Set<int>, Set<std::string>, Set<Enum>, Set<EnumClass>>;
-INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ConstructorTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, LookupTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(FlatHashSet, ModifiersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ModifiersTest, SetTypes);
TEST(FlatHashSet, EmplaceString) {
std::vector<std::string> v = {"a", "b"};
@@ -124,5 +126,5 @@ TEST(FlatHashSet, MergeExtractInsert) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 37714baf..27186b15 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -1,10 +1,10 @@
-// Copyright 2018 The Abseil Authors.
+// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -50,11 +50,11 @@
#include "absl/base/internal/throw_delegate.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
+#include "absl/container/internal/inlined_vector.h"
#include "absl/memory/memory.h"
namespace absl {
-inline namespace lts_2018_12_18 {
-
+inline namespace lts_2019_08_08 {
// -----------------------------------------------------------------------------
// InlinedVector
// -----------------------------------------------------------------------------
@@ -67,119 +67,181 @@ inline namespace lts_2018_12_18 {
// designed to cover the same API footprint as covered by `std::vector`.
template <typename T, size_t N, typename A = std::allocator<T>>
class InlinedVector {
- static_assert(N > 0, "InlinedVector requires inline capacity greater than 0");
- constexpr static typename A::size_type inlined_capacity() {
- return static_cast<typename A::size_type>(N);
- }
+ static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity.");
- template <typename Iterator>
- using DisableIfIntegral =
- absl::enable_if_t<!std::is_integral<Iterator>::value>;
+ using Storage = inlined_vector_internal::Storage<T, N, A>;
+ using rvalue_reference = typename Storage::rvalue_reference;
+ using MoveIterator = typename Storage::MoveIterator;
+ using AllocatorTraits = typename Storage::AllocatorTraits;
+ using IsMemcpyOk = typename Storage::IsMemcpyOk;
template <typename Iterator>
- using EnableIfInputIterator = absl::enable_if_t<std::is_convertible<
- typename std::iterator_traits<Iterator>::iterator_category,
- std::input_iterator_tag>::value>;
+ using IteratorValueAdapter =
+ typename Storage::template IteratorValueAdapter<Iterator>;
+ using CopyValueAdapter = typename Storage::CopyValueAdapter;
+ using DefaultValueAdapter = typename Storage::DefaultValueAdapter;
template <typename Iterator>
- using IteratorCategory =
- typename std::iterator_traits<Iterator>::iterator_category;
-
- using rvalue_reference = typename A::value_type&&;
+ using EnableIfAtLeastForwardIterator = absl::enable_if_t<
+ inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+ template <typename Iterator>
+ using DisableIfAtLeastForwardIterator = absl::enable_if_t<
+ !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
public:
- using allocator_type = A;
- using value_type = typename allocator_type::value_type;
- using pointer = typename allocator_type::pointer;
- using const_pointer = typename allocator_type::const_pointer;
- using reference = typename allocator_type::reference;
- using const_reference = typename allocator_type::const_reference;
- using size_type = typename allocator_type::size_type;
- using difference_type = typename allocator_type::difference_type;
- using iterator = pointer;
- using const_iterator = const_pointer;
- using reverse_iterator = std::reverse_iterator<iterator>;
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using allocator_type = typename Storage::allocator_type;
+ using value_type = typename Storage::value_type;
+ using pointer = typename Storage::pointer;
+ using const_pointer = typename Storage::const_pointer;
+ using reference = typename Storage::reference;
+ using const_reference = typename Storage::const_reference;
+ using size_type = typename Storage::size_type;
+ using difference_type = typename Storage::difference_type;
+ using iterator = typename Storage::iterator;
+ using const_iterator = typename Storage::const_iterator;
+ using reverse_iterator = typename Storage::reverse_iterator;
+ using const_reverse_iterator = typename Storage::const_reverse_iterator;
// ---------------------------------------------------------------------------
// InlinedVector Constructors and Destructor
// ---------------------------------------------------------------------------
- // Creates an empty inlined vector with a default initialized allocator.
- InlinedVector() noexcept(noexcept(allocator_type()))
- : allocator_and_tag_(allocator_type()) {}
+ // Creates an empty inlined vector with a value-initialized allocator.
+ InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {}
- // Creates an empty inlined vector with a specified allocator.
+ // Creates an empty inlined vector with a copy of `alloc`.
explicit InlinedVector(const allocator_type& alloc) noexcept
- : allocator_and_tag_(alloc) {}
+ : storage_(alloc) {}
// Creates an inlined vector with `n` copies of `value_type()`.
explicit InlinedVector(size_type n,
const allocator_type& alloc = allocator_type())
- : allocator_and_tag_(alloc) {
- InitAssign(n);
+ : storage_(alloc) {
+ storage_.Initialize(DefaultValueAdapter(), n);
}
// Creates an inlined vector with `n` copies of `v`.
InlinedVector(size_type n, const_reference v,
const allocator_type& alloc = allocator_type())
- : allocator_and_tag_(alloc) {
- InitAssign(n, v);
+ : storage_(alloc) {
+ storage_.Initialize(CopyValueAdapter(v), n);
}
- // Creates an inlined vector of copies of the values in `init_list`.
- InlinedVector(std::initializer_list<value_type> init_list,
+ // Creates an inlined vector with copies of the elements of `list`.
+ InlinedVector(std::initializer_list<value_type> list,
const allocator_type& alloc = allocator_type())
- : allocator_and_tag_(alloc) {
- AppendRange(init_list.begin(), init_list.end(),
- IteratorCategory<decltype(init_list.begin())>{});
- }
+ : InlinedVector(list.begin(), list.end(), alloc) {}
// Creates an inlined vector with elements constructed from the provided
- // Iterator range [`first`, `last`).
+ // forward iterator range [`first`, `last`).
//
- // NOTE: The `enable_if` prevents ambiguous interpretation between a call to
+ // NOTE: the `enable_if` prevents ambiguous interpretation between a call to
// this constructor with two integral arguments and a call to the above
// `InlinedVector(size_type, const_reference)` constructor.
- template <typename InputIterator, DisableIfIntegral<InputIterator>* = nullptr>
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ InlinedVector(ForwardIterator first, ForwardIterator last,
+ const allocator_type& alloc = allocator_type())
+ : storage_(alloc) {
+ storage_.Initialize(IteratorValueAdapter<ForwardIterator>(first),
+ std::distance(first, last));
+ }
+
+ // Creates an inlined vector with elements constructed from the provided input
+ // iterator range [`first`, `last`).
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
InlinedVector(InputIterator first, InputIterator last,
const allocator_type& alloc = allocator_type())
- : allocator_and_tag_(alloc) {
- AppendRange(first, last, IteratorCategory<InputIterator>{});
+ : storage_(alloc) {
+ std::copy(first, last, std::back_inserter(*this));
}
- // Creates a copy of `other` using `other`'s allocator.
- InlinedVector(const InlinedVector& other);
+ // Creates an inlined vector by copying the contents of `other` using
+ // `other`'s allocator.
+ InlinedVector(const InlinedVector& other)
+ : InlinedVector(other, *other.storage_.GetAllocPtr()) {}
- // Creates a copy of `other` but with a specified allocator.
- InlinedVector(const InlinedVector& other, const allocator_type& alloc);
+ // Creates an inlined vector by copying the contents of `other` using `alloc`.
+ InlinedVector(const InlinedVector& other, const allocator_type& alloc)
+ : storage_(alloc) {
+ if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
+ storage_.MemcpyFrom(other.storage_);
+ } else {
+ storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()),
+ other.size());
+ }
+ }
- // Creates an inlined vector by moving in the contents of `other`.
+ // Creates an inlined vector by moving in the contents of `other` without
+ // allocating. If `other` contains allocated memory, the newly-created inlined
+ // vector will take ownership of that memory. However, if `other` does not
+ // contain allocated memory, the newly-created inlined vector will perform
+ // element-wise move construction of the contents of `other`.
//
- // NOTE: This move constructor does not allocate and only moves the underlying
- // objects, so its `noexcept` specification depends on whether moving the
- // underlying objects can throw or not. We assume:
- // a) move constructors should only throw due to allocation failure and
+ // NOTE: since no allocation is performed for the inlined vector in either
+ // case, the `noexcept(...)` specification depends on whether moving the
+ // underlying objects can throw. It is assumed assumed that...
+ // a) move constructors should only throw due to allocation failure.
// b) if `value_type`'s move constructor allocates, it uses the same
- // allocation function as the `InlinedVector`'s allocator, so the move
- // constructor is non-throwing if the allocator is non-throwing or
- // `value_type`'s move constructor is specified as `noexcept`.
- InlinedVector(InlinedVector&& v) noexcept(
+ // allocation function as the inlined vector's allocator.
+ // Thus, the move constructor is non-throwing if the allocator is non-throwing
+ // or `value_type`'s move constructor is specified as `noexcept`.
+ InlinedVector(InlinedVector&& other) noexcept(
absl::allocator_is_nothrow<allocator_type>::value ||
- std::is_nothrow_move_constructible<value_type>::value);
+ std::is_nothrow_move_constructible<value_type>::value)
+ : storage_(*other.storage_.GetAllocPtr()) {
+ if (IsMemcpyOk::value) {
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else if (other.storage_.GetIsAllocated()) {
+ storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocatedSize(other.storage_.GetSize());
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ IteratorValueAdapter<MoveIterator> other_values(
+ MoveIterator(other.storage_.GetInlinedData()));
+
+ inlined_vector_internal::ConstructElements(
+ storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values,
+ other.storage_.GetSize());
- // Creates an inlined vector by moving in the contents of `other`.
+ storage_.SetInlinedSize(other.storage_.GetSize());
+ }
+ }
+
+ // Creates an inlined vector by moving in the contents of `other` with a copy
+ // of `alloc`.
//
- // NOTE: This move constructor allocates and subsequently moves the underlying
- // objects, so its `noexcept` specification depends on whether the allocation
- // can throw and whether moving the underlying objects can throw. Based on the
- // same assumptions as above, the `noexcept` specification is dominated by
- // whether the allocation can throw regardless of whether `value_type`'s move
- // constructor is specified as `noexcept`.
- InlinedVector(InlinedVector&& v, const allocator_type& alloc) noexcept(
- absl::allocator_is_nothrow<allocator_type>::value);
+ // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other`
+ // contains allocated memory, this move constructor will still allocate. Since
+ // allocation is performed, this constructor can only be `noexcept` if the
+ // specified allocator is also `noexcept`.
+ InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept(
+ absl::allocator_is_nothrow<allocator_type>::value)
+ : storage_(alloc) {
+ if (IsMemcpyOk::value) {
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) &&
+ other.storage_.GetIsAllocated()) {
+ storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocatedSize(other.storage_.GetSize());
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ storage_.Initialize(
+ IteratorValueAdapter<MoveIterator>(MoveIterator(other.data())),
+ other.size());
+ }
+ }
- ~InlinedVector() { clear(); }
+ ~InlinedVector() {}
// ---------------------------------------------------------------------------
// InlinedVector Member Accessors
@@ -187,87 +249,102 @@ class InlinedVector {
// `InlinedVector::empty()`
//
- // Checks if the inlined vector has no elements.
+ // Returns whether the inlined vector contains no elements.
bool empty() const noexcept { return !size(); }
// `InlinedVector::size()`
//
// Returns the number of elements in the inlined vector.
- size_type size() const noexcept { return tag().size(); }
+ size_type size() const noexcept { return storage_.GetSize(); }
// `InlinedVector::max_size()`
//
- // Returns the maximum number of elements the vector can hold.
+ // Returns the maximum number of elements the inlined vector can hold.
size_type max_size() const noexcept {
// One bit of the size storage is used to indicate whether the inlined
- // vector is allocated. As a result, the maximum size of the container that
- // we can express is half of the max for `size_type`.
+ // vector contains allocated memory. As a result, the maximum size that the
+ // inlined vector can express is half of the max for `size_type`.
return (std::numeric_limits<size_type>::max)() / 2;
}
// `InlinedVector::capacity()`
//
- // Returns the number of elements that can be stored in the inlined vector
- // without requiring a reallocation of underlying memory.
+ // Returns the number of elements that could be stored in the inlined vector
+ // without requiring a reallocation.
//
- // NOTE: For most inlined vectors, `capacity()` should equal
- // `inlined_capacity()`. For inlined vectors which exceed this capacity, they
- // will no longer be inlined and `capacity()` will equal its capacity on the
- // allocated heap.
+ // NOTE: for most inlined vectors, `capacity()` should be equal to the
+ // template parameter `N`. For inlined vectors which exceed this capacity,
+ // they will no longer be inlined and `capacity()` will equal the capactity of
+ // the allocated memory.
size_type capacity() const noexcept {
- return allocated() ? allocation().capacity() : inlined_capacity();
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity()
+ : storage_.GetInlinedCapacity();
}
// `InlinedVector::data()`
//
- // Returns a `pointer` to elements of the inlined vector. This pointer can be
- // used to access and modify the contained elements.
- // Only results within the range [`0`, `size()`) are defined.
+ // Returns a `pointer` to the elements of the inlined vector. This pointer
+ // can be used to access and modify the contained elements.
+ //
+ // NOTE: only elements within [`data()`, `data() + size()`) are valid.
pointer data() noexcept {
- return allocated() ? allocated_space() : inlined_space();
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
+ : storage_.GetInlinedData();
}
- // Overload of `InlinedVector::data()` to return a `const_pointer` to elements
- // of the inlined vector. This pointer can be used to access (but not modify)
- // the contained elements.
+ // Overload of `InlinedVector::data()` that returns a `const_pointer` to the
+ // elements of the inlined vector. This pointer can be used to access but not
+ // modify the contained elements.
+ //
+ // NOTE: only elements within [`data()`, `data() + size()`) are valid.
const_pointer data() const noexcept {
- return allocated() ? allocated_space() : inlined_space();
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
+ : storage_.GetInlinedData();
}
- // `InlinedVector::operator[]()`
+ // `InlinedVector::operator[](...)`
//
- // Returns a `reference` to the `i`th element of the inlined vector using the
- // array operator.
+ // Returns a `reference` to the `i`th element of the inlined vector.
reference operator[](size_type i) {
assert(i < size());
+
return data()[i];
}
- // Overload of `InlinedVector::operator[]()` to return a `const_reference` to
- // the `i`th element of the inlined vector.
+ // Overload of `InlinedVector::operator[](...)` that returns a
+ // `const_reference` to the `i`th element of the inlined vector.
const_reference operator[](size_type i) const {
assert(i < size());
+
return data()[i];
}
- // `InlinedVector::at()`
+ // `InlinedVector::at(...)`
//
// Returns a `reference` to the `i`th element of the inlined vector.
+ //
+ // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
+ // in both debug and non-debug builds, `std::out_of_range` will be thrown.
reference at(size_type i) {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange(
- "InlinedVector::at() failed bounds check");
+ "`InlinedVector::at(size_type)` failed bounds check");
}
+
return data()[i];
}
- // Overload of `InlinedVector::at()` to return a `const_reference` to the
- // `i`th element of the inlined vector.
+ // Overload of `InlinedVector::at(...)` that returns a `const_reference` to
+ // the `i`th element of the inlined vector.
+ //
+ // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
+ // in both debug and non-debug builds, `std::out_of_range` will be thrown.
const_reference at(size_type i) const {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange(
- "InlinedVector::at() failed bounds check");
+ "`InlinedVector::at(size_type) const` failed bounds check");
}
+
return data()[i];
}
@@ -276,13 +353,15 @@ class InlinedVector {
// Returns a `reference` to the first element of the inlined vector.
reference front() {
assert(!empty());
+
return at(0);
}
- // Overload of `InlinedVector::front()` returns a `const_reference` to the
- // first element of the inlined vector.
+ // Overload of `InlinedVector::front()` that returns a `const_reference` to
+ // the first element of the inlined vector.
const_reference front() const {
assert(!empty());
+
return at(0);
}
@@ -291,13 +370,15 @@ class InlinedVector {
// Returns a `reference` to the last element of the inlined vector.
reference back() {
assert(!empty());
+
return at(size() - 1);
}
- // Overload of `InlinedVector::back()` to return a `const_reference` to the
+ // Overload of `InlinedVector::back()` that returns a `const_reference` to the
// last element of the inlined vector.
const_reference back() const {
assert(!empty());
+
return at(size() - 1);
}
@@ -306,7 +387,7 @@ class InlinedVector {
// Returns an `iterator` to the beginning of the inlined vector.
iterator begin() noexcept { return data(); }
- // Overload of `InlinedVector::begin()` to return a `const_iterator` to
+ // Overload of `InlinedVector::begin()` that returns a `const_iterator` to
// the beginning of the inlined vector.
const_iterator begin() const noexcept { return data(); }
@@ -315,7 +396,7 @@ class InlinedVector {
// Returns an `iterator` to the end of the inlined vector.
iterator end() noexcept { return data() + size(); }
- // Overload of `InlinedVector::end()` to return a `const_iterator` to the
+ // Overload of `InlinedVector::end()` that returns a `const_iterator` to the
// end of the inlined vector.
const_iterator end() const noexcept { return data() + size(); }
@@ -334,7 +415,7 @@ class InlinedVector {
// Returns a `reverse_iterator` from the end of the inlined vector.
reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
- // Overload of `InlinedVector::rbegin()` to return a
+ // Overload of `InlinedVector::rbegin()` that returns a
// `const_reverse_iterator` from the end of the inlined vector.
const_reverse_iterator rbegin() const noexcept {
return const_reverse_iterator(end());
@@ -345,7 +426,7 @@ class InlinedVector {
// Returns a `reverse_iterator` from the beginning of the inlined vector.
reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
- // Overload of `InlinedVector::rend()` to return a `const_reverse_iterator`
+ // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator`
// from the beginning of the inlined vector.
const_reverse_iterator rend() const noexcept {
return const_reverse_iterator(begin());
@@ -364,1086 +445,403 @@ class InlinedVector {
// `InlinedVector::get_allocator()`
//
- // Returns a copy of the allocator of the inlined vector.
- allocator_type get_allocator() const { return allocator(); }
+ // Returns a copy of the inlined vector's allocator.
+ allocator_type get_allocator() const { return *storage_.GetAllocPtr(); }
// ---------------------------------------------------------------------------
// InlinedVector Member Mutators
// ---------------------------------------------------------------------------
- // `InlinedVector::operator=()`
+ // `InlinedVector::operator=(...)`
//
- // Replaces the contents of the inlined vector with copies of the elements in
- // the provided `std::initializer_list`.
- InlinedVector& operator=(std::initializer_list<value_type> init_list) {
- AssignRange(init_list.begin(), init_list.end(),
- IteratorCategory<decltype(init_list.begin())>{});
+ // Replaces the elements of the inlined vector with copies of the elements of
+ // `list`.
+ InlinedVector& operator=(std::initializer_list<value_type> list) {
+ assign(list.begin(), list.end());
+
return *this;
}
- // Overload of `InlinedVector::operator=()` to replace the contents of the
- // inlined vector with the contents of `other`.
+ // Overload of `InlinedVector::operator=(...)` that replaces the elements of
+ // the inlined vector with copies of the elements of `other`.
InlinedVector& operator=(const InlinedVector& other) {
- if (ABSL_PREDICT_FALSE(this == &other)) return *this;
-
- // Optimized to avoid reallocation.
- // Prefer reassignment to copy construction for elements.
- if (size() < other.size()) { // grow
- reserve(other.size());
- std::copy(other.begin(), other.begin() + size(), begin());
- std::copy(other.begin() + size(), other.end(), std::back_inserter(*this));
- } else { // maybe shrink
- erase(begin() + other.size(), end());
- std::copy(other.begin(), other.end(), begin());
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ const_pointer other_data = other.data();
+ assign(other_data, other_data + other.size());
}
+
return *this;
}
- // Overload of `InlinedVector::operator=()` to replace the contents of the
- // inlined vector with the contents of `other`.
+ // Overload of `InlinedVector::operator=(...)` that moves the elements of
+ // `other` into the inlined vector.
//
- // NOTE: As a result of calling this overload, `other` may be empty or it's
- // contents may be left in a moved-from state.
+ // NOTE: as a result of calling this overload, `other` is left in a valid but
+ // unspecified state.
InlinedVector& operator=(InlinedVector&& other) {
- if (ABSL_PREDICT_FALSE(this == &other)) return *this;
-
- if (other.allocated()) {
- clear();
- tag().set_allocated_size(other.size());
- init_allocation(other.allocation());
- other.tag() = Tag();
- } else {
- if (allocated()) clear();
- // Both are inlined now.
- if (size() < other.size()) {
- auto mid = std::make_move_iterator(other.begin() + size());
- std::copy(std::make_move_iterator(other.begin()), mid, begin());
- UninitializedCopy(mid, std::make_move_iterator(other.end()), end());
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) {
+ inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
+ size());
+ storage_.DeallocateIfAllocated();
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
} else {
- auto new_end = std::copy(std::make_move_iterator(other.begin()),
- std::make_move_iterator(other.end()), begin());
- Destroy(new_end, end());
+ storage_.Assign(IteratorValueAdapter<MoveIterator>(
+ MoveIterator(other.storage_.GetInlinedData())),
+ other.size());
}
- tag().set_inline_size(other.size());
}
+
return *this;
}
- // `InlinedVector::assign()`
+ // `InlinedVector::assign(...)`
//
// Replaces the contents of the inlined vector with `n` copies of `v`.
void assign(size_type n, const_reference v) {
- if (n <= size()) { // Possibly shrink
- std::fill_n(begin(), n, v);
- erase(begin() + n, end());
- return;
- }
- // Grow
- reserve(n);
- std::fill_n(begin(), size(), v);
- if (allocated()) {
- UninitializedFill(allocated_space() + size(), allocated_space() + n, v);
- tag().set_allocated_size(n);
- } else {
- UninitializedFill(inlined_space() + size(), inlined_space() + n, v);
- tag().set_inline_size(n);
- }
+ storage_.Assign(CopyValueAdapter(v), n);
+ }
+
+ // Overload of `InlinedVector::assign(...)` that replaces the contents of the
+ // inlined vector with copies of the elements of `list`.
+ void assign(std::initializer_list<value_type> list) {
+ assign(list.begin(), list.end());
}
- // Overload of `InlinedVector::assign()` to replace the contents of the
- // inlined vector with copies of the values in the provided
- // `std::initializer_list`.
- void assign(std::initializer_list<value_type> init_list) {
- AssignRange(init_list.begin(), init_list.end(),
- IteratorCategory<decltype(init_list.begin())>{});
+ // Overload of `InlinedVector::assign(...)` to replace the contents of the
+ // inlined vector with the range [`first`, `last`).
+ //
+ // NOTE: this overload is for iterators that are "forward" category or better.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ void assign(ForwardIterator first, ForwardIterator last) {
+ storage_.Assign(IteratorValueAdapter<ForwardIterator>(first),
+ std::distance(first, last));
}
- // Overload of `InlinedVector::assign()` to replace the contents of the
- // inlined vector with values constructed from the range [`first`, `last`).
- template <typename InputIterator, DisableIfIntegral<InputIterator>* = nullptr>
+ // Overload of `InlinedVector::assign(...)` to replace the contents of the
+ // inlined vector with the range [`first`, `last`).
+ //
+ // NOTE: this overload is for iterators that are "input" category.
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
void assign(InputIterator first, InputIterator last) {
- AssignRange(first, last, IteratorCategory<InputIterator>{});
+ size_type i = 0;
+ for (; i < size() && first != last; ++i, static_cast<void>(++first)) {
+ at(i) = *first;
+ }
+
+ erase(data() + i, data() + size());
+
+ std::copy(first, last, std::back_inserter(*this));
}
- // `InlinedVector::resize()`
+ // `InlinedVector::resize(...)`
+ //
+ // Resizes the inlined vector to contain `n` elements.
//
- // Resizes the inlined vector to contain `n` elements. If `n` is smaller than
- // the inlined vector's current size, extra elements are destroyed. If `n` is
- // larger than the initial size, new elements are value-initialized.
- void resize(size_type n);
+ // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // is larger than `size()`, new elements are value-initialized.
+ void resize(size_type n) { storage_.Resize(DefaultValueAdapter(), n); }
- // Overload of `InlinedVector::resize()` to resize the inlined vector to
- // contain `n` elements where, if `n` is larger than `size()`, the new values
- // will be copy-constructed from `v`.
- void resize(size_type n, const_reference v);
+ // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to
+ // contain `n` elements.
+ //
+ // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // is larger than `size()`, new elements are copied-constructed from `v`.
+ void resize(size_type n, const_reference v) {
+ storage_.Resize(CopyValueAdapter(v), n);
+ }
- // `InlinedVector::insert()`
+ // `InlinedVector::insert(...)`
//
- // Copies `v` into `position`, returning an `iterator` pointing to the newly
+ // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly
// inserted element.
- iterator insert(const_iterator position, const_reference v) {
- return emplace(position, v);
+ iterator insert(const_iterator pos, const_reference v) {
+ return emplace(pos, v);
}
- // Overload of `InlinedVector::insert()` for moving `v` into `position`,
- // returning an iterator pointing to the newly inserted element.
- iterator insert(const_iterator position, rvalue_reference v) {
- return emplace(position, std::move(v));
+ // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
+ // move semantics, returning an `iterator` to the newly inserted element.
+ iterator insert(const_iterator pos, rvalue_reference v) {
+ return emplace(pos, std::move(v));
}
- // Overload of `InlinedVector::insert()` for inserting `n` contiguous copies
- // of `v` starting at `position`. Returns an `iterator` pointing to the first
- // of the newly inserted elements.
- iterator insert(const_iterator position, size_type n, const_reference v) {
- return InsertWithCount(position, n, v);
+ // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies
+ // of `v` starting at `pos`, returning an `iterator` pointing to the first of
+ // the newly inserted elements.
+ iterator insert(const_iterator pos, size_type n, const_reference v) {
+ assert(pos >= begin());
+ assert(pos <= end());
+
+ if (ABSL_PREDICT_TRUE(n != 0)) {
+ value_type dealias = v;
+ return storage_.Insert(pos, CopyValueAdapter(dealias), n);
+ } else {
+ return const_cast<iterator>(pos);
+ }
}
- // Overload of `InlinedVector::insert()` for copying the contents of the
- // `std::initializer_list` into the vector starting at `position`. Returns an
- // `iterator` pointing to the first of the newly inserted elements.
- iterator insert(const_iterator position,
- std::initializer_list<value_type> init_list) {
- return insert(position, init_list.begin(), init_list.end());
+ // Overload of `InlinedVector::insert(...)` that inserts copies of the
+ // elements of `list` starting at `pos`, returning an `iterator` pointing to
+ // the first of the newly inserted elements.
+ iterator insert(const_iterator pos, std::initializer_list<value_type> list) {
+ return insert(pos, list.begin(), list.end());
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts the range [`first`,
+ // `last`) starting at `pos`, returning an `iterator` pointing to the first
+ // of the newly inserted elements.
+ //
+ // NOTE: this overload is for iterators that are "forward" category or better.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ iterator insert(const_iterator pos, ForwardIterator first,
+ ForwardIterator last) {
+ assert(pos >= begin());
+ assert(pos <= end());
+
+ if (ABSL_PREDICT_TRUE(first != last)) {
+ return storage_.Insert(pos, IteratorValueAdapter<ForwardIterator>(first),
+ std::distance(first, last));
+ } else {
+ return const_cast<iterator>(pos);
+ }
}
- // Overload of `InlinedVector::insert()` for inserting elements constructed
- // from the range [`first`, `last`). Returns an `iterator` pointing to the
- // first of the newly inserted elements.
+ // Overload of `InlinedVector::insert(...)` that inserts the range [`first`,
+ // `last`) starting at `pos`, returning an `iterator` pointing to the first
+ // of the newly inserted elements.
//
- // NOTE: The `enable_if` is intended to disambiguate the two three-argument
- // overloads of `insert()`.
+ // NOTE: this overload is for iterators that are "input" category.
template <typename InputIterator,
- typename = EnableIfInputIterator<InputIterator>>
- iterator insert(const_iterator position, InputIterator first,
- InputIterator last) {
- return InsertWithRange(position, first, last,
- IteratorCategory<InputIterator>());
+ DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
+ assert(pos >= begin());
+ assert(pos <= end());
+
+ size_type index = std::distance(cbegin(), pos);
+ for (size_type i = index; first != last; ++i, static_cast<void>(++first)) {
+ insert(data() + i, *first);
+ }
+
+ return iterator(data() + index);
}
- // `InlinedVector::emplace()`
+ // `InlinedVector::emplace(...)`
//
- // Constructs and inserts an object in the inlined vector at the given
- // `position`, returning an `iterator` pointing to the newly emplaced element.
+ // Constructs and inserts an element using `args...` in the inlined vector at
+ // `pos`, returning an `iterator` pointing to the newly emplaced element.
template <typename... Args>
- iterator emplace(const_iterator position, Args&&... args);
+ iterator emplace(const_iterator pos, Args&&... args) {
+ assert(pos >= begin());
+ assert(pos <= end());
- // `InlinedVector::emplace_back()`
+ value_type dealias(std::forward<Args>(args)...);
+ return storage_.Insert(pos,
+ IteratorValueAdapter<MoveIterator>(
+ MoveIterator(std::addressof(dealias))),
+ 1);
+ }
+
+ // `InlinedVector::emplace_back(...)`
//
- // Constructs and appends a new element to the end of the inlined vector,
- // returning a `reference` to the emplaced element.
+ // Constructs and inserts an element using `args...` in the inlined vector at
+ // `end()`, returning a `reference` to the newly emplaced element.
template <typename... Args>
reference emplace_back(Args&&... args) {
- size_type s = size();
- assert(s <= capacity());
- if (ABSL_PREDICT_FALSE(s == capacity())) {
- return GrowAndEmplaceBack(std::forward<Args>(args)...);
- }
- assert(s < capacity());
-
- pointer space;
- if (allocated()) {
- tag().set_allocated_size(s + 1);
- space = allocated_space();
- } else {
- tag().set_inline_size(s + 1);
- space = inlined_space();
- }
- return Construct(space + s, std::forward<Args>(args)...);
+ return storage_.EmplaceBack(std::forward<Args>(args)...);
}
- // `InlinedVector::push_back()`
+ // `InlinedVector::push_back(...)`
//
- // Appends a copy of `v` to the end of the inlined vector.
+ // Inserts a copy of `v` in the inlined vector at `end()`.
void push_back(const_reference v) { static_cast<void>(emplace_back(v)); }
- // Overload of `InlinedVector::push_back()` for moving `v` into a newly
- // appended element.
+ // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()`
+ // using move semantics.
void push_back(rvalue_reference v) {
static_cast<void>(emplace_back(std::move(v)));
}
// `InlinedVector::pop_back()`
//
- // Destroys the element at the end of the inlined vector and shrinks the size
- // by `1` (unless the inlined vector is empty, in which case this is a no-op).
+ // Destroys the element at `back()`, reducing the size by `1`.
void pop_back() noexcept {
assert(!empty());
- size_type s = size();
- if (allocated()) {
- Destroy(allocated_space() + s - 1, allocated_space() + s);
- tag().set_allocated_size(s - 1);
- } else {
- Destroy(inlined_space() + s - 1, inlined_space() + s);
- tag().set_inline_size(s - 1);
- }
+
+ AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1));
+ storage_.SubtractSize(1);
}
- // `InlinedVector::erase()`
+ // `InlinedVector::erase(...)`
//
- // Erases the element at `position` of the inlined vector, returning an
- // `iterator` pointing to the first element following the erased element.
+ // Erases the element at `pos`, returning an `iterator` pointing to where the
+ // erased element was located.
//
- // NOTE: May return the end iterator, which is not dereferencable.
- iterator erase(const_iterator position) {
- assert(position >= begin());
- assert(position < end());
-
- iterator pos = const_cast<iterator>(position);
- std::move(pos + 1, end(), pos);
- pop_back();
- return pos;
+ // NOTE: may return `end()`, which is not dereferencable.
+ iterator erase(const_iterator pos) {
+ assert(pos >= begin());
+ assert(pos < end());
+
+ return storage_.Erase(pos, pos + 1);
}
- // Overload of `InlinedVector::erase()` for erasing all elements in the
- // range [`from`, `to`) in the inlined vector. Returns an `iterator` pointing
- // to the first element following the range erased or the end iterator if `to`
- // was the end iterator.
- iterator erase(const_iterator from, const_iterator to);
+ // Overload of `InlinedVector::erase(...)` that erases every element in the
+ // range [`from`, `to`), returning an `iterator` pointing to where the first
+ // erased element was located.
+ //
+ // NOTE: may return `end()`, which is not dereferencable.
+ iterator erase(const_iterator from, const_iterator to) {
+ assert(from >= begin());
+ assert(from <= to);
+ assert(to <= end());
+
+ if (ABSL_PREDICT_TRUE(from != to)) {
+ return storage_.Erase(from, to);
+ } else {
+ return const_cast<iterator>(from);
+ }
+ }
// `InlinedVector::clear()`
//
- // Destroys all elements in the inlined vector, sets the size of `0` and
- // deallocates the heap allocation if the inlined vector was allocated.
+ // Destroys all elements in the inlined vector, setting the size to `0` and
+ // deallocating any held memory.
void clear() noexcept {
- size_type s = size();
- if (allocated()) {
- Destroy(allocated_space(), allocated_space() + s);
- allocation().Dealloc(allocator());
- } else if (s != 0) { // do nothing for empty vectors
- Destroy(inlined_space(), inlined_space() + s);
- }
- tag() = Tag();
+ inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
+ size());
+ storage_.DeallocateIfAllocated();
+ storage_.SetInlinedSize(0);
}
- // `InlinedVector::reserve()`
+ // `InlinedVector::reserve(...)`
//
- // Enlarges the underlying representation of the inlined vector so it can hold
- // at least `n` elements. This method does not change `size()` or the actual
- // contents of the vector.
- //
- // NOTE: If `n` does not exceed `capacity()`, `reserve()` will have no
- // effects. Otherwise, `reserve()` will reallocate, performing an n-time
- // element-wise move of everything contained.
- void reserve(size_type n) {
- if (n > capacity()) {
- // Make room for new elements
- EnlargeBy(n - size());
- }
- }
+ // Ensures that there is enough room for at least `n` elements.
+ void reserve(size_type n) { storage_.Reserve(n); }
// `InlinedVector::shrink_to_fit()`
//
- // Reduces memory usage by freeing unused memory. After this call, calls to
- // `capacity()` will be equal to `(std::max)(inlined_capacity(), size())`.
+ // Reduces memory usage by freeing unused memory. After being called, calls to
+ // `capacity()` will be equal to `max(N, size())`.
//
- // If `size() <= inlined_capacity()` and the elements are currently stored on
- // the heap, they will be moved to the inlined storage and the heap memory
+ // If `size() <= N` and the inlined vector contains allocated memory, the
+ // elements will all be moved to the inlined space and the allocated memory
// will be deallocated.
//
- // If `size() > inlined_capacity()` and `size() < capacity()` the elements
- // will be moved to a smaller heap allocation.
+ // If `size() > N` and `size() < capacity()`, the elements will be moved to a
+ // smaller allocation.
void shrink_to_fit() {
- const auto s = size();
- if (ABSL_PREDICT_FALSE(!allocated() || s == capacity())) return;
-
- if (s <= inlined_capacity()) {
- // Move the elements to the inlined storage.
- // We have to do this using a temporary, because `inlined_storage` and
- // `allocation_storage` are in a union field.
- auto temp = std::move(*this);
- assign(std::make_move_iterator(temp.begin()),
- std::make_move_iterator(temp.end()));
- return;
+ if (storage_.GetIsAllocated()) {
+ storage_.ShrinkToFit();
}
-
- // Reallocate storage and move elements.
- // We can't simply use the same approach as above, because `assign()` would
- // call into `reserve()` internally and reserve larger capacity than we need
- Allocation new_allocation(allocator(), s);
- UninitializedCopy(std::make_move_iterator(allocated_space()),
- std::make_move_iterator(allocated_space() + s),
- new_allocation.buffer());
- ResetAllocation(new_allocation, s);
}
- // `InlinedVector::swap()`
+ // `InlinedVector::swap(...)`
//
- // Swaps the contents of this inlined vector with the contents of `other`.
- void swap(InlinedVector& other);
-
- template <typename Hash>
- friend Hash AbslHashValue(Hash hash, const InlinedVector& inlined_vector) {
- const_pointer p = inlined_vector.data();
- size_type n = inlined_vector.size();
- return Hash::combine(Hash::combine_contiguous(std::move(hash), p, n), n);
- }
-
- private:
- // Holds whether the vector is allocated or not in the lowest bit and the size
- // in the high bits:
- // `size_ = (size << 1) | is_allocated;`
- class Tag {
- public:
- Tag() : size_(0) {}
- size_type size() const { return size_ / 2; }
- void add_size(size_type n) { size_ += n * 2; }
- void set_inline_size(size_type n) { size_ = n * 2; }
- void set_allocated_size(size_type n) { size_ = (n * 2) + 1; }
- bool allocated() const { return size_ % 2; }
-
- private:
- size_type size_;
- };
-
- // Derives from `allocator_type` to use the empty base class optimization.
- // If the `allocator_type` is stateless, we can store our instance for free.
- class AllocatorAndTag : private allocator_type {
- public:
- explicit AllocatorAndTag(const allocator_type& a) : allocator_type(a) {}
-
- Tag& tag() { return tag_; }
- const Tag& tag() const { return tag_; }
-
- allocator_type& allocator() { return *this; }
- const allocator_type& allocator() const { return *this; }
-
- private:
- Tag tag_;
- };
-
- class Allocation {
- public:
- Allocation(allocator_type& a, size_type capacity)
- : capacity_(capacity), buffer_(Create(a, capacity)) {}
-
- void Dealloc(allocator_type& a) {
- std::allocator_traits<allocator_type>::deallocate(a, buffer_, capacity_);
- }
-
- size_type capacity() const { return capacity_; }
-
- const_pointer buffer() const { return buffer_; }
-
- pointer buffer() { return buffer_; }
-
- private:
- static pointer Create(allocator_type& a, size_type n) {
- return std::allocator_traits<allocator_type>::allocate(a, n);
+ // Swaps the contents of the inlined vector with `other`.
+ void swap(InlinedVector& other) {
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ storage_.Swap(std::addressof(other.storage_));
}
-
- size_type capacity_;
- pointer buffer_;
- };
-
- const Tag& tag() const { return allocator_and_tag_.tag(); }
-
- Tag& tag() { return allocator_and_tag_.tag(); }
-
- Allocation& allocation() {
- return reinterpret_cast<Allocation&>(rep_.allocation_storage.allocation);
- }
-
- const Allocation& allocation() const {
- return reinterpret_cast<const Allocation&>(
- rep_.allocation_storage.allocation);
- }
-
- void init_allocation(const Allocation& allocation) {
- new (&rep_.allocation_storage.allocation) Allocation(allocation);
- }
-
- // TODO(absl-team): investigate whether the reinterpret_cast is appropriate.
- pointer inlined_space() {
- return reinterpret_cast<pointer>(
- std::addressof(rep_.inlined_storage.inlined[0]));
- }
-
- const_pointer inlined_space() const {
- return reinterpret_cast<const_pointer>(
- std::addressof(rep_.inlined_storage.inlined[0]));
- }
-
- pointer allocated_space() { return allocation().buffer(); }
-
- const_pointer allocated_space() const { return allocation().buffer(); }
-
- const allocator_type& allocator() const {
- return allocator_and_tag_.allocator();
}
- allocator_type& allocator() { return allocator_and_tag_.allocator(); }
-
- bool allocated() const { return tag().allocated(); }
-
- // Enlarge the underlying representation so we can store `size_ + delta` elems
- // in allocated space. The size is not changed, and any newly added memory is
- // not initialized.
- void EnlargeBy(size_type delta);
-
- // Shift all elements from `position` to `end()` by `n` places to the right.
- // If the vector needs to be enlarged, memory will be allocated.
- // Returns `iterator`s pointing to the start of the previously-initialized
- // portion and the start of the uninitialized portion of the created gap.
- // The number of initialized spots is `pair.second - pair.first`. The number
- // of raw spots is `n - (pair.second - pair.first)`.
- //
- // Updates the size of the InlinedVector internally.
- std::pair<iterator, iterator> ShiftRight(const_iterator position,
- size_type n);
-
- void ResetAllocation(Allocation new_allocation, size_type new_size) {
- if (allocated()) {
- Destroy(allocated_space(), allocated_space() + size());
- assert(begin() == allocated_space());
- allocation().Dealloc(allocator());
- allocation() = new_allocation;
- } else {
- Destroy(inlined_space(), inlined_space() + size());
- init_allocation(new_allocation); // bug: only init once
- }
- tag().set_allocated_size(new_size);
- }
-
- template <typename... Args>
- reference GrowAndEmplaceBack(Args&&... args) {
- assert(size() == capacity());
- const size_type s = size();
-
- Allocation new_allocation(allocator(), 2 * capacity());
-
- reference new_element =
- Construct(new_allocation.buffer() + s, std::forward<Args>(args)...);
- UninitializedCopy(std::make_move_iterator(data()),
- std::make_move_iterator(data() + s),
- new_allocation.buffer());
-
- ResetAllocation(new_allocation, s + 1);
-
- return new_element;
- }
-
- void InitAssign(size_type n);
-
- void InitAssign(size_type n, const_reference v);
-
- template <typename... Args>
- reference Construct(pointer p, Args&&... args) {
- std::allocator_traits<allocator_type>::construct(
- allocator(), p, std::forward<Args>(args)...);
- return *p;
- }
-
- template <typename Iterator>
- void UninitializedCopy(Iterator src, Iterator src_last, pointer dst) {
- for (; src != src_last; ++dst, ++src) Construct(dst, *src);
- }
-
- template <typename... Args>
- void UninitializedFill(pointer dst, pointer dst_last, const Args&... args) {
- for (; dst != dst_last; ++dst) Construct(dst, args...);
- }
-
- // Destroy [`from`, `to`) in place.
- void Destroy(pointer from, pointer to);
-
- template <typename Iterator>
- void AppendRange(Iterator first, Iterator last, std::forward_iterator_tag);
-
- template <typename Iterator>
- void AppendRange(Iterator first, Iterator last, std::input_iterator_tag);
-
- template <typename Iterator>
- void AssignRange(Iterator first, Iterator last, std::forward_iterator_tag);
+ private:
+ template <typename H, typename TheT, size_t TheN, typename TheA>
+ friend H AbslHashValue(H h, const absl::InlinedVector<TheT, TheN, TheA>& a);
- template <typename Iterator>
- void AssignRange(Iterator first, Iterator last, std::input_iterator_tag);
-
- iterator InsertWithCount(const_iterator position, size_type n,
- const_reference v);
-
- template <typename ForwardIterator>
- iterator InsertWithRange(const_iterator position, ForwardIterator first,
- ForwardIterator last, std::forward_iterator_tag);
-
- template <typename InputIterator>
- iterator InsertWithRange(const_iterator position, InputIterator first,
- InputIterator last, std::input_iterator_tag);
-
- // Stores either the inlined or allocated representation
- union Rep {
- using ValueTypeBuffer =
- absl::aligned_storage_t<sizeof(value_type), alignof(value_type)>;
- using AllocationBuffer =
- absl::aligned_storage_t<sizeof(Allocation), alignof(Allocation)>;
-
- // Structs wrap the buffers to perform indirection that solves a bizarre
- // compilation error on Visual Studio (all known versions).
- struct InlinedRep {
- ValueTypeBuffer inlined[N];
- };
- struct AllocatedRep {
- AllocationBuffer allocation;
- };
-
- InlinedRep inlined_storage;
- AllocatedRep allocation_storage;
- };
-
- AllocatorAndTag allocator_and_tag_;
- Rep rep_;
+ Storage storage_;
};
// -----------------------------------------------------------------------------
// InlinedVector Non-Member Functions
// -----------------------------------------------------------------------------
-// `swap()`
+// `swap(...)`
//
-// Swaps the contents of two inlined vectors. This convenience function
-// simply calls `InlinedVector::swap()`.
+// Swaps the contents of two inlined vectors.
template <typename T, size_t N, typename A>
-void swap(InlinedVector<T, N, A>& a,
- InlinedVector<T, N, A>& b) noexcept(noexcept(a.swap(b))) {
+void swap(absl::InlinedVector<T, N, A>& a,
+ absl::InlinedVector<T, N, A>& b) noexcept(noexcept(a.swap(b))) {
a.swap(b);
}
-// `operator==()`
+// `operator==(...)`
//
-// Tests the equivalency of the contents of two inlined vectors.
+// Tests for value-equality of two inlined vectors.
template <typename T, size_t N, typename A>
-bool operator==(const InlinedVector<T, N, A>& a,
- const InlinedVector<T, N, A>& b) {
- return absl::equal(a.begin(), a.end(), b.begin(), b.end());
+bool operator==(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ auto a_data = a.data();
+ auto b_data = b.data();
+ return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
}
-// `operator!=()`
+// `operator!=(...)`
//
-// Tests the inequality of the contents of two inlined vectors.
+// Tests for value-inequality of two inlined vectors.
template <typename T, size_t N, typename A>
-bool operator!=(const InlinedVector<T, N, A>& a,
- const InlinedVector<T, N, A>& b) {
+bool operator!=(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
return !(a == b);
}
-// `operator<()`
+// `operator<(...)`
//
-// Tests whether the contents of one inlined vector are less than the contents
-// of another through a lexicographical comparison operation.
+// Tests whether the value of an inlined vector is less than the value of
+// another inlined vector using a lexicographical comparison algorithm.
template <typename T, size_t N, typename A>
-bool operator<(const InlinedVector<T, N, A>& a,
- const InlinedVector<T, N, A>& b) {
- return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+bool operator<(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ auto a_data = a.data();
+ auto b_data = b.data();
+ return std::lexicographical_compare(a_data, a_data + a.size(), b_data,
+ b_data + b.size());
}
-// `operator>()`
+// `operator>(...)`
//
-// Tests whether the contents of one inlined vector are greater than the
-// contents of another through a lexicographical comparison operation.
+// Tests whether the value of an inlined vector is greater than the value of
+// another inlined vector using a lexicographical comparison algorithm.
template <typename T, size_t N, typename A>
-bool operator>(const InlinedVector<T, N, A>& a,
- const InlinedVector<T, N, A>& b) {
+bool operator>(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
return b < a;
}
-// `operator<=()`
+// `operator<=(...)`
//
-// Tests whether the contents of one inlined vector are less than or equal to
-// the contents of another through a lexicographical comparison operation.
+// Tests whether the value of an inlined vector is less than or equal to the
+// value of another inlined vector using a lexicographical comparison algorithm.
template <typename T, size_t N, typename A>
-bool operator<=(const InlinedVector<T, N, A>& a,
- const InlinedVector<T, N, A>& b) {
+bool operator<=(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
return !(b < a);
}
-// `operator>=()`
+// `operator>=(...)`
//
-// Tests whether the contents of one inlined vector are greater than or equal to
-// the contents of another through a lexicographical comparison operation.
+// Tests whether the value of an inlined vector is greater than or equal to the
+// value of another inlined vector using a lexicographical comparison algorithm.
template <typename T, size_t N, typename A>
-bool operator>=(const InlinedVector<T, N, A>& a,
- const InlinedVector<T, N, A>& b) {
+bool operator>=(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
return !(a < b);
}
-// -----------------------------------------------------------------------------
-// Implementation of InlinedVector
+// `AbslHashValue(...)`
//
-// Do not depend on any below implementation details!
-// -----------------------------------------------------------------------------
-
-template <typename T, size_t N, typename A>
-InlinedVector<T, N, A>::InlinedVector(const InlinedVector& other)
- : allocator_and_tag_(other.allocator()) {
- reserve(other.size());
- if (allocated()) {
- UninitializedCopy(other.begin(), other.end(), allocated_space());
- tag().set_allocated_size(other.size());
- } else {
- UninitializedCopy(other.begin(), other.end(), inlined_space());
- tag().set_inline_size(other.size());
- }
-}
-
-template <typename T, size_t N, typename A>
-InlinedVector<T, N, A>::InlinedVector(const InlinedVector& other,
- const allocator_type& alloc)
- : allocator_and_tag_(alloc) {
- reserve(other.size());
- if (allocated()) {
- UninitializedCopy(other.begin(), other.end(), allocated_space());
- tag().set_allocated_size(other.size());
- } else {
- UninitializedCopy(other.begin(), other.end(), inlined_space());
- tag().set_inline_size(other.size());
- }
-}
-
-template <typename T, size_t N, typename A>
-InlinedVector<T, N, A>::InlinedVector(InlinedVector&& other) noexcept(
- absl::allocator_is_nothrow<allocator_type>::value ||
- std::is_nothrow_move_constructible<value_type>::value)
- : allocator_and_tag_(other.allocator_and_tag_) {
- if (other.allocated()) {
- // We can just steal the underlying buffer from the source.
- // That leaves the source empty, so we clear its size.
- init_allocation(other.allocation());
- other.tag() = Tag();
- } else {
- UninitializedCopy(
- std::make_move_iterator(other.inlined_space()),
- std::make_move_iterator(other.inlined_space() + other.size()),
- inlined_space());
- }
-}
-
-template <typename T, size_t N, typename A>
-InlinedVector<T, N, A>::InlinedVector(InlinedVector&& other,
- const allocator_type& alloc) noexcept( //
- absl::allocator_is_nothrow<allocator_type>::value)
- : allocator_and_tag_(alloc) {
- if (other.allocated()) {
- if (alloc == other.allocator()) {
- // We can just steal the allocation from the source.
- tag() = other.tag();
- init_allocation(other.allocation());
- other.tag() = Tag();
- } else {
- // We need to use our own allocator
- reserve(other.size());
- UninitializedCopy(std::make_move_iterator(other.begin()),
- std::make_move_iterator(other.end()),
- allocated_space());
- tag().set_allocated_size(other.size());
- }
- } else {
- UninitializedCopy(
- std::make_move_iterator(other.inlined_space()),
- std::make_move_iterator(other.inlined_space() + other.size()),
- inlined_space());
- tag().set_inline_size(other.size());
- }
-}
-
-template <typename T, size_t N, typename A>
-void InlinedVector<T, N, A>::InitAssign(size_type n, const_reference v) {
- if (n > inlined_capacity()) {
- Allocation new_allocation(allocator(), n);
- init_allocation(new_allocation);
- UninitializedFill(allocated_space(), allocated_space() + n, v);
- tag().set_allocated_size(n);
- } else {
- UninitializedFill(inlined_space(), inlined_space() + n, v);
- tag().set_inline_size(n);
- }
-}
-
-template <typename T, size_t N, typename A>
-void InlinedVector<T, N, A>::InitAssign(size_type n) {
- if (n > inlined_capacity()) {
- Allocation new_allocation(allocator(), n);
- init_allocation(new_allocation);
- UninitializedFill(allocated_space(), allocated_space() + n);
- tag().set_allocated_size(n);
- } else {
- UninitializedFill(inlined_space(), inlined_space() + n);
- tag().set_inline_size(n);
- }
-}
-
-template <typename T, size_t N, typename A>
-void InlinedVector<T, N, A>::resize(size_type n) {
- size_type s = size();
- if (n < s) {
- erase(begin() + n, end());
- return;
- }
- reserve(n);
- assert(capacity() >= n);
-
- // Fill new space with elements constructed in-place.
- if (allocated()) {
- UninitializedFill(allocated_space() + s, allocated_space() + n);
- tag().set_allocated_size(n);
- } else {
- UninitializedFill(inlined_space() + s, inlined_space() + n);
- tag().set_inline_size(n);
- }
-}
-
-template <typename T, size_t N, typename A>
-void InlinedVector<T, N, A>::resize(size_type n, const_reference v) {
- size_type s = size();
- if (n < s) {
- erase(begin() + n, end());
- return;
- }
- reserve(n);
- assert(capacity() >= n);
-
- // Fill new space with copies of 'v'.
- if (allocated()) {
- UninitializedFill(allocated_space() + s, allocated_space() + n, v);
- tag().set_allocated_size(n);
- } else {
- UninitializedFill(inlined_space() + s, inlined_space() + n, v);
- tag().set_inline_size(n);
- }
-}
-
-template <typename T, size_t N, typename A>
-template <typename... Args>
-auto InlinedVector<T, N, A>::emplace(const_iterator position, Args&&... args)
- -> iterator {
- assert(position >= begin());
- assert(position <= end());
- if (ABSL_PREDICT_FALSE(position == end())) {
- emplace_back(std::forward<Args>(args)...);
- return end() - 1;
- }
-
- T new_t = T(std::forward<Args>(args)...);
-
- auto range = ShiftRight(position, 1);
- if (range.first == range.second) {
- // constructing into uninitialized memory
- Construct(range.first, std::move(new_t));
- } else {
- // assigning into moved-from object
- *range.first = T(std::move(new_t));
- }
-
- return range.first;
-}
-
-template <typename T, size_t N, typename A>
-auto InlinedVector<T, N, A>::erase(const_iterator from, const_iterator to)
- -> iterator {
- assert(begin() <= from);
- assert(from <= to);
- assert(to <= end());
-
- iterator range_start = const_cast<iterator>(from);
- iterator range_end = const_cast<iterator>(to);
-
- size_type s = size();
- ptrdiff_t erase_gap = std::distance(range_start, range_end);
- if (erase_gap > 0) {
- pointer space;
- if (allocated()) {
- space = allocated_space();
- tag().set_allocated_size(s - erase_gap);
- } else {
- space = inlined_space();
- tag().set_inline_size(s - erase_gap);
- }
- std::move(range_end, space + s, range_start);
- Destroy(space + s - erase_gap, space + s);
- }
- return range_start;
-}
-
-template <typename T, size_t N, typename A>
-void InlinedVector<T, N, A>::swap(InlinedVector& other) {
- using std::swap; // Augment ADL with `std::swap`.
- if (ABSL_PREDICT_FALSE(this == &other)) return;
-
- if (allocated() && other.allocated()) {
- // Both out of line, so just swap the tag, allocation, and allocator.
- swap(tag(), other.tag());
- swap(allocation(), other.allocation());
- swap(allocator(), other.allocator());
- return;
- }
- if (!allocated() && !other.allocated()) {
- // Both inlined: swap up to smaller size, then move remaining elements.
- InlinedVector* a = this;
- InlinedVector* b = &other;
- if (size() < other.size()) {
- swap(a, b);
- }
-
- const size_type a_size = a->size();
- const size_type b_size = b->size();
- assert(a_size >= b_size);
- // `a` is larger. Swap the elements up to the smaller array size.
- std::swap_ranges(a->inlined_space(), a->inlined_space() + b_size,
- b->inlined_space());
-
- // Move the remaining elements:
- // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b`
- b->UninitializedCopy(a->inlined_space() + b_size,
- a->inlined_space() + a_size,
- b->inlined_space() + b_size);
- a->Destroy(a->inlined_space() + b_size, a->inlined_space() + a_size);
-
- swap(a->tag(), b->tag());
- swap(a->allocator(), b->allocator());
- assert(b->size() == a_size);
- assert(a->size() == b_size);
- return;
- }
-
- // One is out of line, one is inline.
- // We first move the elements from the inlined vector into the
- // inlined space in the other vector. We then put the other vector's
- // pointer/capacity into the originally inlined vector and swap
- // the tags.
- InlinedVector* a = this;
- InlinedVector* b = &other;
- if (a->allocated()) {
- swap(a, b);
- }
- assert(!a->allocated());
- assert(b->allocated());
- const size_type a_size = a->size();
- const size_type b_size = b->size();
- // In an optimized build, `b_size` would be unused.
- static_cast<void>(b_size);
-
- // Made Local copies of `size()`, don't need `tag()` accurate anymore
- swap(a->tag(), b->tag());
-
- // Copy `b_allocation` out before `b`'s union gets clobbered by `inline_space`
- Allocation b_allocation = b->allocation();
-
- b->UninitializedCopy(a->inlined_space(), a->inlined_space() + a_size,
- b->inlined_space());
- a->Destroy(a->inlined_space(), a->inlined_space() + a_size);
-
- a->allocation() = b_allocation;
-
- if (a->allocator() != b->allocator()) {
- swap(a->allocator(), b->allocator());
- }
-
- assert(b->size() == a_size);
- assert(a->size() == b_size);
-}
-
-template <typename T, size_t N, typename A>
-void InlinedVector<T, N, A>::EnlargeBy(size_type delta) {
- const size_type s = size();
- assert(s <= capacity());
-
- size_type target = std::max(inlined_capacity(), s + delta);
-
- // Compute new capacity by repeatedly doubling current capacity
- // TODO(psrc): Check and avoid overflow?
- size_type new_capacity = capacity();
- while (new_capacity < target) {
- new_capacity <<= 1;
- }
-
- Allocation new_allocation(allocator(), new_capacity);
-
- UninitializedCopy(std::make_move_iterator(data()),
- std::make_move_iterator(data() + s),
- new_allocation.buffer());
-
- ResetAllocation(new_allocation, s);
-}
-
-template <typename T, size_t N, typename A>
-auto InlinedVector<T, N, A>::ShiftRight(const_iterator position, size_type n)
- -> std::pair<iterator, iterator> {
- iterator start_used = const_cast<iterator>(position);
- iterator start_raw = const_cast<iterator>(position);
- size_type s = size();
- size_type required_size = s + n;
-
- if (required_size > capacity()) {
- // Compute new capacity by repeatedly doubling current capacity
- size_type new_capacity = capacity();
- while (new_capacity < required_size) {
- new_capacity <<= 1;
- }
- // Move everyone into the new allocation, leaving a gap of `n` for the
- // requested shift.
- Allocation new_allocation(allocator(), new_capacity);
- size_type index = position - begin();
- UninitializedCopy(std::make_move_iterator(data()),
- std::make_move_iterator(data() + index),
- new_allocation.buffer());
- UninitializedCopy(std::make_move_iterator(data() + index),
- std::make_move_iterator(data() + s),
- new_allocation.buffer() + index + n);
- ResetAllocation(new_allocation, s);
-
- // New allocation means our iterator is invalid, so we'll recalculate.
- // Since the entire gap is in new space, there's no used space to reuse.
- start_raw = begin() + index;
- start_used = start_raw;
- } else {
- // If we had enough space, it's a two-part move. Elements going into
- // previously-unoccupied space need an `UninitializedCopy()`. Elements
- // going into a previously-occupied space are just a `std::move()`.
- iterator pos = const_cast<iterator>(position);
- iterator raw_space = end();
- size_type slots_in_used_space = raw_space - pos;
- size_type new_elements_in_used_space = std::min(n, slots_in_used_space);
- size_type new_elements_in_raw_space = n - new_elements_in_used_space;
- size_type old_elements_in_used_space =
- slots_in_used_space - new_elements_in_used_space;
-
- UninitializedCopy(std::make_move_iterator(pos + old_elements_in_used_space),
- std::make_move_iterator(raw_space),
- raw_space + new_elements_in_raw_space);
- std::move_backward(pos, pos + old_elements_in_used_space, raw_space);
-
- // If the gap is entirely in raw space, the used space starts where the raw
- // space starts, leaving no elements in used space. If the gap is entirely
- // in used space, the raw space starts at the end of the gap, leaving all
- // elements accounted for within the used space.
- start_used = pos;
- start_raw = pos + new_elements_in_used_space;
- }
- tag().add_size(n);
- return std::make_pair(start_used, start_raw);
-}
-
-template <typename T, size_t N, typename A>
-void InlinedVector<T, N, A>::Destroy(pointer from, pointer to) {
- for (pointer cur = from; cur != to; ++cur) {
- std::allocator_traits<allocator_type>::destroy(allocator(), cur);
- }
-#ifndef NDEBUG
- // Overwrite unused memory with `0xab` so we can catch uninitialized usage.
- // Cast to `void*` to tell the compiler that we don't care that we might be
- // scribbling on a vtable pointer.
- if (from != to) {
- auto len = sizeof(value_type) * std::distance(from, to);
- std::memset(reinterpret_cast<void*>(from), 0xab, len);
- }
-#endif
-}
-
-template <typename T, size_t N, typename A>
-template <typename Iterator>
-void InlinedVector<T, N, A>::AppendRange(Iterator first, Iterator last,
- std::forward_iterator_tag) {
- auto length = std::distance(first, last);
- reserve(size() + length);
- if (allocated()) {
- UninitializedCopy(first, last, allocated_space() + size());
- tag().set_allocated_size(size() + length);
- } else {
- UninitializedCopy(first, last, inlined_space() + size());
- tag().set_inline_size(size() + length);
- }
-}
-
-template <typename T, size_t N, typename A>
-template <typename Iterator>
-void InlinedVector<T, N, A>::AppendRange(Iterator first, Iterator last,
- std::input_iterator_tag) {
- std::copy(first, last, std::back_inserter(*this));
-}
-
-template <typename T, size_t N, typename A>
-template <typename Iterator>
-void InlinedVector<T, N, A>::AssignRange(Iterator first, Iterator last,
- std::forward_iterator_tag) {
- auto length = std::distance(first, last);
- // Prefer reassignment to copy construction for elements.
- if (static_cast<size_type>(length) <= size()) {
- erase(std::copy(first, last, begin()), end());
- return;
- }
- reserve(length);
- iterator out = begin();
- for (; out != end(); ++first, ++out) *out = *first;
- if (allocated()) {
- UninitializedCopy(first, last, out);
- tag().set_allocated_size(length);
- } else {
- UninitializedCopy(first, last, out);
- tag().set_inline_size(length);
- }
-}
-
-template <typename T, size_t N, typename A>
-template <typename Iterator>
-void InlinedVector<T, N, A>::AssignRange(Iterator first, Iterator last,
- std::input_iterator_tag) {
- // Optimized to avoid reallocation.
- // Prefer reassignment to copy construction for elements.
- iterator out = begin();
- for (; first != last && out != end(); ++first, ++out) {
- *out = *first;
- }
- erase(out, end());
- std::copy(first, last, std::back_inserter(*this));
-}
-
-template <typename T, size_t N, typename A>
-auto InlinedVector<T, N, A>::InsertWithCount(const_iterator position,
- size_type n, const_reference v)
- -> iterator {
- assert(position >= begin() && position <= end());
- if (ABSL_PREDICT_FALSE(n == 0)) return const_cast<iterator>(position);
-
- value_type copy = v;
- std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
- std::fill(it_pair.first, it_pair.second, copy);
- UninitializedFill(it_pair.second, it_pair.first + n, copy);
-
- return it_pair.first;
-}
-
-template <typename T, size_t N, typename A>
-template <typename ForwardIterator>
-auto InlinedVector<T, N, A>::InsertWithRange(const_iterator position,
- ForwardIterator first,
- ForwardIterator last,
- std::forward_iterator_tag)
- -> iterator {
- assert(position >= begin() && position <= end());
- if (ABSL_PREDICT_FALSE(first == last)) return const_cast<iterator>(position);
-
- auto n = std::distance(first, last);
- std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
- size_type used_spots = it_pair.second - it_pair.first;
- ForwardIterator open_spot = std::next(first, used_spots);
- std::copy(first, open_spot, it_pair.first);
- UninitializedCopy(open_spot, last, it_pair.second);
- return it_pair.first;
-}
-
-template <typename T, size_t N, typename A>
-template <typename InputIterator>
-auto InlinedVector<T, N, A>::InsertWithRange(const_iterator position,
- InputIterator first,
- InputIterator last,
- std::input_iterator_tag)
- -> iterator {
- assert(position >= begin() && position <= end());
- size_type index = position - cbegin();
- size_type i = index;
- while (first != last) insert(begin() + i++, *first++);
- return begin() + index;
+// Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to
+// call this directly.
+template <typename H, typename T, size_t N, typename A>
+H AbslHashValue(H h, const absl::InlinedVector<T, N, A>& a) {
+ auto size = a.size();
+ return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size);
}
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INLINED_VECTOR_H_
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc
index a3ad0f8a..b99bbd62 100644
--- a/absl/container/inlined_vector_benchmark.cc
+++ b/absl/container/inlined_vector_benchmark.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,28 +12,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "absl/container/inlined_vector.h"
-
+#include <array>
#include <string>
#include <vector>
#include "benchmark/benchmark.h"
#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
namespace {
-using IntVec = absl::InlinedVector<int, 8>;
-
void BM_InlinedVectorFill(benchmark::State& state) {
- const int len = state.range(0);
+ absl::InlinedVector<int, 8> v;
+ int val = 10;
for (auto _ : state) {
- IntVec v;
- for (int i = 0; i < len; i++) {
- v.push_back(i);
- }
+ benchmark::DoNotOptimize(v);
+ v.push_back(val);
}
- state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
}
BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024);
@@ -43,23 +40,25 @@ void BM_InlinedVectorFillRange(benchmark::State& state) {
for (int i = 0; i < len; i++) {
ia[i] = i;
}
+ auto* from = ia.get();
+ auto* to = from + len;
for (auto _ : state) {
- IntVec v(ia.get(), ia.get() + len);
+ benchmark::DoNotOptimize(from);
+ benchmark::DoNotOptimize(to);
+ absl::InlinedVector<int, 8> v(from, to);
benchmark::DoNotOptimize(v);
}
- state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
}
BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024);
void BM_StdVectorFill(benchmark::State& state) {
- const int len = state.range(0);
+ std::vector<int> v;
+ int val = 10;
for (auto _ : state) {
- std::vector<int> v;
- for (int i = 0; i < len; i++) {
- v.push_back(i);
- }
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(val);
+ v.push_back(val);
}
- state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
}
BENCHMARK(BM_StdVectorFill)->Range(0, 1024);
@@ -89,7 +88,7 @@ void BM_InlinedVectorFillString(benchmark::State& state) {
const int len = state.range(0);
const int no_sso = GetNonShortStringOptimizationSize();
std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
- std::string(no_sso, 'C'), std::string(no_sso, 'D')};
+ std::string(no_sso, 'C'), std::string(no_sso, 'D')};
for (auto _ : state) {
absl::InlinedVector<std::string, 8> v;
@@ -105,7 +104,7 @@ void BM_StdVectorFillString(benchmark::State& state) {
const int len = state.range(0);
const int no_sso = GetNonShortStringOptimizationSize();
std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
- std::string(no_sso, 'C'), std::string(no_sso, 'D')};
+ std::string(no_sso, 'C'), std::string(no_sso, 'D')};
for (auto _ : state) {
std::vector<std::string> v;
@@ -124,7 +123,7 @@ struct Buffer { // some arbitrary structure for benchmarking.
void* user_data;
};
-void BM_InlinedVectorTenAssignments(benchmark::State& state) {
+void BM_InlinedVectorAssignments(benchmark::State& state) {
const int len = state.range(0);
using BufferVec = absl::InlinedVector<Buffer, 2>;
@@ -133,18 +132,25 @@ void BM_InlinedVectorTenAssignments(benchmark::State& state) {
BufferVec dst;
for (auto _ : state) {
- for (int i = 0; i < 10; ++i) {
- dst = src;
- }
+ benchmark::DoNotOptimize(dst);
+ benchmark::DoNotOptimize(src);
+ dst = src;
}
}
-BENCHMARK(BM_InlinedVectorTenAssignments)
- ->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(20);
+BENCHMARK(BM_InlinedVectorAssignments)
+ ->Arg(0)
+ ->Arg(1)
+ ->Arg(2)
+ ->Arg(3)
+ ->Arg(4)
+ ->Arg(20);
void BM_CreateFromContainer(benchmark::State& state) {
for (auto _ : state) {
- absl::InlinedVector<int, 4> x(absl::InlinedVector<int, 4>{1, 2, 3});
- benchmark::DoNotOptimize(x);
+ absl::InlinedVector<int, 4> src{1, 2, 3};
+ benchmark::DoNotOptimize(src);
+ absl::InlinedVector<int, 4> dst(std::move(src));
+ benchmark::DoNotOptimize(dst);
}
}
BENCHMARK(BM_CreateFromContainer);
@@ -159,15 +165,14 @@ struct LargeCopyableOnly {
struct LargeCopyableSwappable {
LargeCopyableSwappable() : d(1024, 17) {}
+
LargeCopyableSwappable(const LargeCopyableSwappable& o) = default;
- LargeCopyableSwappable(LargeCopyableSwappable&& o) = delete;
LargeCopyableSwappable& operator=(LargeCopyableSwappable o) {
using std::swap;
swap(*this, o);
return *this;
}
- LargeCopyableSwappable& operator=(LargeCopyableSwappable&& o) = delete;
friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) {
using std::swap;
@@ -215,6 +220,8 @@ void BM_SwapElements(benchmark::State& state) {
Vec b;
for (auto _ : state) {
using std::swap;
+ benchmark::DoNotOptimize(a);
+ benchmark::DoNotOptimize(b);
swap(a, b);
}
}
@@ -260,60 +267,44 @@ BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
void BM_InlinedVectorIndexInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
- for (int i = 0; i < 1000; ++i) {
- benchmark::DoNotOptimize(v);
- benchmark::DoNotOptimize(v[4]);
- }
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v[4]);
}
- state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorIndexInlined);
void BM_InlinedVectorIndexExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- for (int i = 0; i < 1000; ++i) {
- benchmark::DoNotOptimize(v);
- benchmark::DoNotOptimize(v[4]);
- }
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v[4]);
}
- state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorIndexExternal);
void BM_StdVectorIndex(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- for (int i = 0; i < 1000; ++i) {
- benchmark::DoNotOptimize(v);
- benchmark::DoNotOptimize(v[4]);
- }
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v[4]);
}
- state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_StdVectorIndex);
-#define UNROLL_2(x) \
- benchmark::DoNotOptimize(x); \
- benchmark::DoNotOptimize(x);
-
-#define UNROLL_4(x) UNROLL_2(x) UNROLL_2(x)
-#define UNROLL_8(x) UNROLL_4(x) UNROLL_4(x)
-#define UNROLL_16(x) UNROLL_8(x) UNROLL_8(x);
-
void BM_InlinedVectorDataInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
- UNROLL_16(v.data());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.data());
}
- state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorDataInlined);
void BM_InlinedVectorDataExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- UNROLL_16(v.data());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.data());
}
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
@@ -322,7 +313,8 @@ BENCHMARK(BM_InlinedVectorDataExternal);
void BM_StdVectorData(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- UNROLL_16(v.data());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.data());
}
state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
@@ -331,55 +323,482 @@ BENCHMARK(BM_StdVectorData);
void BM_InlinedVectorSizeInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
- UNROLL_16(v.size());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.size());
}
- state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorSizeInlined);
void BM_InlinedVectorSizeExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- UNROLL_16(v.size());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.size());
}
- state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorSizeExternal);
void BM_StdVectorSize(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- UNROLL_16(v.size());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.size());
}
- state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_StdVectorSize);
void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
for (auto _ : state) {
- UNROLL_16(v.empty());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.empty());
}
- state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorEmptyInlined);
void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- UNROLL_16(v.empty());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.empty());
}
- state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_InlinedVectorEmptyExternal);
void BM_StdVectorEmpty(benchmark::State& state) {
std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
for (auto _ : state) {
- UNROLL_16(v.empty());
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.empty());
}
- state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_StdVectorEmpty);
+constexpr size_t kInlinedCapacity = 4;
+constexpr size_t kLargeSize = kInlinedCapacity * 2;
+constexpr size_t kSmallSize = kInlinedCapacity / 2;
+constexpr size_t kBatchSize = 100;
+
+#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize)
+
+#define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize)
+
+template <typename T>
+using InlVec = absl::InlinedVector<T, kInlinedCapacity>;
+
+struct TrivialType {
+ size_t val;
+};
+
+class NontrivialType {
+ public:
+ ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() {
+ benchmark::DoNotOptimize(*this);
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other)
+ : val_(other.val_) {
+ benchmark::DoNotOptimize(*this);
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=(
+ const NontrivialType& other) {
+ val_ = other.val_;
+ benchmark::DoNotOptimize(*this);
+ return *this;
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept {
+ benchmark::DoNotOptimize(*this);
+ }
+
+ private:
+ size_t val_;
+};
+
+template <typename T, typename PrepareVecFn, typename TestVecFn>
+void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec,
+ TestVecFn test_vec) {
+ std::array<InlVec<T>, kBatchSize> vector_batch{};
+
+ while (state.KeepRunningBatch(kBatchSize)) {
+ // Prepare batch
+ state.PauseTiming();
+ for (size_t i = 0; i < kBatchSize; ++i) {
+ prepare_vec(vector_batch.data() + i, i);
+ }
+ benchmark::DoNotOptimize(vector_batch);
+ state.ResumeTiming();
+
+ // Test batch
+ for (size_t i = 0; i < kBatchSize; ++i) {
+ test_vec(vector_batch.data() + i, i);
+ }
+ }
+}
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromSize(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ auto size = ToSize;
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(size);
+ ::new (ptr) VecT(size);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromSizeRef(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ auto size = ToSize;
+ auto ref = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(size);
+ benchmark::DoNotOptimize(ref);
+ ::new (ptr) VecT(size, ref);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromRange(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<T, ToSize> arr{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(arr);
+ ::new (ptr) VecT(arr.begin(), arr.end());
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromCopy(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ VecT other_vec(ToSize);
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(other_vec);
+ ::new (ptr) VecT(other_vec);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromMove(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<VecT, kBatchSize> vector_batch{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ vector_batch[i].clear();
+ vector_batch[i].resize(ToSize);
+ vec->~VecT();
+ },
+ /* test_vec = */
+ [&](void* ptr, size_t i) {
+ benchmark::DoNotOptimize(vector_batch[i]);
+ ::new (ptr) VecT(std::move(vector_batch[i]));
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignSizeRef(benchmark::State& state) {
+ auto size = ToSize;
+ auto ref = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(size);
+ benchmark::DoNotOptimize(ref);
+ vec->assign(size, ref);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignRange(benchmark::State& state) {
+ std::array<T, ToSize> arr{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(arr);
+ vec->assign(arr.begin(), arr.end());
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignFromCopy(benchmark::State& state) {
+ InlVec<T> other_vec(ToSize);
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(other_vec);
+ *vec = other_vec;
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignFromMove(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<VecT, kBatchSize> vector_batch{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ vector_batch[i].clear();
+ vector_batch[i].resize(ToSize);
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ benchmark::DoNotOptimize(vector_batch[i]);
+ *vec = std::move(vector_batch[i]);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_ResizeSize(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->resize(ToSize); });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_ResizeSizeRef(benchmark::State& state) {
+ auto t = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(t);
+ vec->resize(ToSize, t);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_InsertSizeRef(benchmark::State& state) {
+ auto t = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(t);
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->insert(pos, t);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_InsertRange(benchmark::State& state) {
+ InlVec<T> other_vec(ToSize);
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(other_vec);
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->insert(pos, other_vec.begin(), other_vec.end());
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_EmplaceBack(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->emplace_back(); });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_PopBack(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->pop_back(); });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_EraseOne(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) {
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->erase(pos);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_EraseRange(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) {
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->erase(pos, pos + 1);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_Clear(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToCapacity>
+void BM_Reserve(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType);
+
+template <typename T, size_t FromCapacity, size_t ToCapacity>
+void BM_ShrinkToFit(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(ToCapacity);
+ vec->reserve(FromCapacity);
+ },
+ /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_Swap(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<VecT, kBatchSize> vector_batch{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ vector_batch[i].clear();
+ vector_batch[i].resize(ToSize);
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ using std::swap;
+ benchmark::DoNotOptimize(vector_batch[i]);
+ swap(*vec, vector_batch[i]);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType);
+
} // namespace
diff --git a/absl/container/inlined_vector_exception_safety_test.cc b/absl/container/inlined_vector_exception_safety_test.cc
new file mode 100644
index 00000000..ff0da75b
--- /dev/null
+++ b/absl/container/inlined_vector_exception_safety_test.cc
@@ -0,0 +1,489 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/exception_safety_testing.h"
+#include "absl/container/inlined_vector.h"
+
+namespace {
+
+constexpr size_t kInlinedCapacity = 4;
+constexpr size_t kLargeSize = kInlinedCapacity * 2;
+constexpr size_t kSmallSize = kInlinedCapacity / 2;
+
+using Thrower = testing::ThrowingValue<>;
+using MovableThrower = testing::ThrowingValue<testing::TypeSpec::kNoThrowMove>;
+using ThrowAlloc = testing::ThrowingAllocator<Thrower>;
+
+using ThrowerVec = absl::InlinedVector<Thrower, kInlinedCapacity>;
+using MovableThrowerVec = absl::InlinedVector<MovableThrower, kInlinedCapacity>;
+
+using ThrowAllocThrowerVec =
+ absl::InlinedVector<Thrower, kInlinedCapacity, ThrowAlloc>;
+using ThrowAllocMovableThrowerVec =
+ absl::InlinedVector<MovableThrower, kInlinedCapacity, ThrowAlloc>;
+
+// In GCC, if an element of a `std::initializer_list` throws during construction
+// the elements that were constructed before it are not destroyed. This causes
+// incorrect exception safety test failures. Thus, `testing::nothrow_ctor` is
+// required. See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66139
+#define ABSL_INTERNAL_MAKE_INIT_LIST(T, N) \
+ (N > kInlinedCapacity \
+ ? std::initializer_list<T>{T(0, testing::nothrow_ctor), \
+ T(1, testing::nothrow_ctor), \
+ T(2, testing::nothrow_ctor), \
+ T(3, testing::nothrow_ctor), \
+ T(4, testing::nothrow_ctor), \
+ T(5, testing::nothrow_ctor), \
+ T(6, testing::nothrow_ctor), \
+ T(7, testing::nothrow_ctor)} \
+ \
+ : std::initializer_list<T>{T(0, testing::nothrow_ctor), \
+ T(1, testing::nothrow_ctor)})
+static_assert((kLargeSize == 8 || kSmallSize == 2),
+ "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...).");
+
+template <typename TheVecT, size_t... TheSizes>
+class TestParams {
+ public:
+ using VecT = TheVecT;
+ constexpr static size_t GetSizeAt(size_t i) { return kSizes[1 + i]; }
+
+ private:
+ constexpr static size_t kSizes[1 + sizeof...(TheSizes)] = {1, TheSizes...};
+};
+
+using NoSizeTestParams =
+ ::testing::Types<TestParams<ThrowerVec>, TestParams<MovableThrowerVec>,
+ TestParams<ThrowAllocThrowerVec>,
+ TestParams<ThrowAllocMovableThrowerVec>>;
+
+using OneSizeTestParams =
+ ::testing::Types<TestParams<ThrowerVec, kLargeSize>,
+ TestParams<ThrowerVec, kSmallSize>,
+ TestParams<MovableThrowerVec, kLargeSize>,
+ TestParams<MovableThrowerVec, kSmallSize>,
+ TestParams<ThrowAllocThrowerVec, kLargeSize>,
+ TestParams<ThrowAllocThrowerVec, kSmallSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kLargeSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kSmallSize>>;
+
+using TwoSizeTestParams = ::testing::Types<
+ TestParams<ThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<ThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<ThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<ThrowerVec, kSmallSize, kSmallSize>,
+ TestParams<MovableThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<MovableThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<MovableThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<MovableThrowerVec, kSmallSize, kSmallSize>,
+ TestParams<ThrowAllocThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<ThrowAllocThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<ThrowAllocThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<ThrowAllocThrowerVec, kSmallSize, kSmallSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kSmallSize>>;
+
+template <typename>
+struct NoSizeTest : ::testing::Test {};
+TYPED_TEST_SUITE(NoSizeTest, NoSizeTestParams);
+
+template <typename>
+struct OneSizeTest : ::testing::Test {};
+TYPED_TEST_SUITE(OneSizeTest, OneSizeTestParams);
+
+template <typename>
+struct TwoSizeTest : ::testing::Test {};
+TYPED_TEST_SUITE(TwoSizeTest, TwoSizeTestParams);
+
+template <typename VecT>
+bool InlinedVectorInvariants(VecT* vec) {
+ if (*vec != *vec) return false;
+ if (vec->size() > vec->capacity()) return false;
+ if (vec->size() > vec->max_size()) return false;
+ if (vec->capacity() > vec->max_size()) return false;
+ if (vec->data() != std::addressof(vec->at(0))) return false;
+ if (vec->data() != vec->begin()) return false;
+ if (*vec->data() != *vec->begin()) return false;
+ if (vec->begin() > vec->end()) return false;
+ if ((vec->end() - vec->begin()) != vec->size()) return false;
+ if (std::distance(vec->begin(), vec->end()) != vec->size()) return false;
+ return true;
+}
+
+// Function that always returns false is correct, but refactoring is required
+// for clarity. It's needed to express that, as a contract, certain operations
+// should not throw at all. Execution of this function means an exception was
+// thrown and thus the test should fail.
+// TODO(johnsoncj): Add `testing::NoThrowGuarantee` to the framework
+template <typename VecT>
+bool NoThrowGuarantee(VecT* /* vec */) {
+ return false;
+}
+
+TYPED_TEST(NoSizeTest, DefaultConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+
+ testing::TestThrowingCtor<VecT>();
+
+ testing::TestThrowingCtor<VecT>(allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, SizeConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ testing::TestThrowingCtor<VecT>(size);
+
+ testing::TestThrowingCtor<VecT>(size, allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, SizeRefConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ testing::TestThrowingCtor<VecT>(size, value_type{});
+
+ testing::TestThrowingCtor<VecT>(size, value_type{}, allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, InitializerListConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ testing::TestThrowingCtor<VecT>(
+ ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size));
+
+ testing::TestThrowingCtor<VecT>(
+ ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size), allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, RangeConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ std::array<value_type, size> arr{};
+
+ testing::TestThrowingCtor<VecT>(arr.begin(), arr.end());
+
+ testing::TestThrowingCtor<VecT>(arr.begin(), arr.end(), allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, CopyConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ VecT other_vec{size};
+
+ testing::TestThrowingCtor<VecT>(other_vec);
+
+ testing::TestThrowingCtor<VecT>(other_vec, allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, MoveConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ if (!absl::allocator_is_nothrow<allocator_type>::value) {
+ testing::TestThrowingCtor<VecT>(VecT{size});
+
+ testing::TestThrowingCtor<VecT>(VecT{size}, allocator_type{});
+ }
+}
+
+TYPED_TEST(TwoSizeTest, Assign) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_size = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ *vec = ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ VecT other_vec{to_size};
+ *vec = other_vec;
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ VecT other_vec{to_size};
+ *vec = std::move(other_vec);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ value_type val{};
+ vec->assign(to_size, val);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->assign(ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size));
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ std::array<value_type, to_size> arr{};
+ vec->assign(arr.begin(), arr.end());
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Resize) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_size = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>,
+ testing::strong_guarantee);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->resize(to_size); //
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->resize(to_size, value_type{}); //
+ }));
+}
+
+TYPED_TEST(OneSizeTest, Insert) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->insert(it, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->insert(it, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ vec->insert(it, value_type{});
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Insert) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto count = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->insert(it, count, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->insert(it, count, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ vec->insert(it, count, value_type{});
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count));
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count));
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count));
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ std::array<value_type, count> arr{};
+ vec->insert(it, arr.begin(), arr.end());
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ std::array<value_type, count> arr{};
+ vec->insert(it, arr.begin(), arr.end());
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ std::array<value_type, count> arr{};
+ vec->insert(it, arr.begin(), arr.end());
+ }));
+}
+
+TYPED_TEST(OneSizeTest, EmplaceBack) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ VecT full_vec{size};
+ full_vec.resize(full_vec.capacity());
+
+ VecT nonfull_vec{size};
+ nonfull_vec.reserve(size + 1);
+
+ auto tester = testing::MakeExceptionSafetyTester().WithContracts(
+ InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) {
+ vec->emplace_back(); //
+ }));
+
+ EXPECT_TRUE(tester.WithInitialValue(full_vec).Test([](VecT* vec) {
+ vec->emplace_back(); //
+ }));
+}
+
+TYPED_TEST(OneSizeTest, PopBack) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(NoThrowGuarantee<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->pop_back(); //
+ }));
+}
+
+TYPED_TEST(OneSizeTest, Erase) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->erase(it);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->erase(it);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() - 1);
+ vec->erase(it);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->erase(it, it + 1);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->erase(it, it + 1);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() - 1);
+ vec->erase(it, it + 1);
+ }));
+}
+
+TYPED_TEST(OneSizeTest, Clear) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(NoThrowGuarantee<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->clear(); //
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Reserve) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_capacity = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->reserve(to_capacity); //
+ }));
+}
+
+TYPED_TEST(OneSizeTest, ShrinkToFit) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->shrink_to_fit(); //
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Swap) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_size = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ VecT other_vec{to_size};
+ vec->swap(other_vec);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ using std::swap;
+ VecT other_vec{to_size};
+ swap(*vec, other_vec);
+ }));
+}
+
+} // namespace
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 3a1ea8ac..bada4fec 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -30,6 +30,7 @@
#include "absl/base/internal/exception_testing.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/container/internal/counting_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/hash/hash_testing.h"
#include "absl/memory/memory.h"
@@ -37,6 +38,7 @@
namespace {
+using absl::container_internal::CountingAllocator;
using absl::test_internal::CopyableMovableInstance;
using absl::test_internal::CopyableOnlyInstance;
using absl::test_internal::InstanceTracker;
@@ -68,18 +70,15 @@ MATCHER_P(ValueIs, e, "") {
// test_instance_tracker.h.
template <typename T>
class InstanceTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(InstanceTest);
+TYPED_TEST_SUITE_P(InstanceTest);
// A simple reference counted class to make sure that the proper elements are
// destroyed in the erase(begin, end) test.
class RefCounted {
public:
- RefCounted(int value, int* count) : value_(value), count_(count) {
- Ref();
- }
+ RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); }
- RefCounted(const RefCounted& v)
- : value_(v.value_), count_(v.count_) {
+ RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) {
Ref();
}
@@ -138,57 +137,6 @@ static IntVec Fill(int len, int offset = 0) {
return v;
}
-// This is a stateful allocator, but the state lives outside of the
-// allocator (in whatever test is using the allocator). This is odd
-// but helps in tests where the allocator is propagated into nested
-// containers - that chain of allocators uses the same state and is
-// thus easier to query for aggregate allocation information.
-template <typename T>
-class CountingAllocator : public std::allocator<T> {
- public:
- using Alloc = std::allocator<T>;
- using pointer = typename Alloc::pointer;
- using size_type = typename Alloc::size_type;
-
- CountingAllocator() : bytes_used_(nullptr) {}
- explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
-
- template <typename U>
- CountingAllocator(const CountingAllocator<U>& x)
- : Alloc(x), bytes_used_(x.bytes_used_) {}
-
- pointer allocate(size_type n,
- std::allocator<void>::const_pointer hint = nullptr) {
- assert(bytes_used_ != nullptr);
- *bytes_used_ += n * sizeof(T);
- return Alloc::allocate(n, hint);
- }
-
- void deallocate(pointer p, size_type n) {
- Alloc::deallocate(p, n);
- assert(bytes_used_ != nullptr);
- *bytes_used_ -= n * sizeof(T);
- }
-
- template<typename U>
- class rebind {
- public:
- using other = CountingAllocator<U>;
- };
-
- friend bool operator==(const CountingAllocator& a,
- const CountingAllocator& b) {
- return a.bytes_used_ == b.bytes_used_;
- }
-
- friend bool operator!=(const CountingAllocator& a,
- const CountingAllocator& b) {
- return !(a == b);
- }
-
- int64_t* bytes_used_;
-};
-
TEST(IntVec, SimpleOps) {
for (int len = 0; len < 20; len++) {
IntVec v;
@@ -239,6 +187,12 @@ TEST(IntVec, SimpleOps) {
}
}
+TEST(IntVec, PopBackNoOverflow) {
+ IntVec v = {1};
+ v.pop_back();
+ EXPECT_EQ(v.size(), 0);
+}
+
TEST(IntVec, AtThrows) {
IntVec v = {1, 2, 3};
EXPECT_EQ(v.at(2), 3);
@@ -333,7 +287,7 @@ TEST(RefCountedVec, EraseBeginEnd) {
}
// Check that the elements at the end are preserved.
- for (int i = erase_end; i< len; ++i) {
+ for (int i = erase_end; i < len; ++i) {
EXPECT_EQ(1, counts[i]);
}
}
@@ -595,10 +549,10 @@ TEST(IntVec, Resize) {
static const int kResizeElem = 1000000;
for (int k = 0; k < 10; k++) {
// Enlarging resize
- v.resize(len+k, kResizeElem);
- EXPECT_EQ(len+k, v.size());
- EXPECT_LE(len+k, v.capacity());
- for (int i = 0; i < len+k; i++) {
+ v.resize(len + k, kResizeElem);
+ EXPECT_EQ(len + k, v.size());
+ EXPECT_LE(len + k, v.capacity());
+ for (int i = 0; i < len + k; i++) {
if (i < len) {
EXPECT_EQ(i, v[i]);
} else {
@@ -909,7 +863,7 @@ TYPED_TEST_P(InstanceTest, Swap) {
auto min_len = std::min(l1, l2);
auto max_len = std::max(l1, l2);
for (int i = 0; i < l1; i++) a.push_back(Instance(i));
- for (int i = 0; i < l2; i++) b.push_back(Instance(100+i));
+ for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i));
EXPECT_EQ(tracker.instances(), l1 + l2);
tracker.ResetCopiesMovesSwaps();
{
@@ -977,7 +931,7 @@ TEST(IntVec, EqualAndNotEqual) {
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
- b[i] = b[i] - 1; // Back to before
+ b[i] = b[i] - 1; // Back to before
EXPECT_TRUE(a == b);
EXPECT_FALSE(a != b);
}
@@ -1044,7 +998,7 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) {
// reserve() must not increase the number of initialized objects
SCOPED_TRACE("reserve");
- v.reserve(len+1000);
+ v.reserve(len + 1000);
EXPECT_EQ(tracker.instances(), len);
EXPECT_EQ(tracker.copies() + tracker.moves(), len);
@@ -1290,9 +1244,8 @@ void InstanceCountElemAssignWithAllocationTest() {
absl::InlinedVector<Instance, 2> v(original_contents.begin(),
original_contents.end());
v.assign(3, Instance(123));
- EXPECT_THAT(v,
- AllOf(SizeIs(3),
- ElementsAre(ValueIs(123), ValueIs(123), ValueIs(123))));
+ EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123),
+ ValueIs(123))));
EXPECT_LE(v.size(), v.capacity());
}
}
@@ -1571,8 +1524,8 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) {
SCOPED_TRACE(original_size);
absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
v.assign({Instance(3), Instance(4), Instance(5)});
- EXPECT_THAT(v, AllOf(SizeIs(3),
- ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
+ EXPECT_THAT(
+ v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
EXPECT_LE(3, v.capacity());
}
}
@@ -1597,7 +1550,7 @@ TEST(DynamicVec, DynamicVecCompiles) {
TEST(AllocatorSupportTest, Constructors) {
using MyAlloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
- const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
int64_t allocated = 0;
MyAlloc alloc(&allocated);
{ AllocVec ABSL_ATTRIBUTE_UNUSED v; }
@@ -1613,7 +1566,7 @@ TEST(AllocatorSupportTest, Constructors) {
TEST(AllocatorSupportTest, CountAllocations) {
using MyAlloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
- const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
int64_t allocated = 0;
MyAlloc alloc(&allocated);
{
@@ -1677,8 +1630,8 @@ TEST(AllocatorSupportTest, SwapBothAllocated) {
int64_t allocated1 = 0;
int64_t allocated2 = 0;
{
- const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
- const int ia2[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+ const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
MyAlloc a1(&allocated1);
MyAlloc a2(&allocated2);
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
@@ -1702,8 +1655,8 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
int64_t allocated1 = 0;
int64_t allocated2 = 0;
{
- const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
- const int ia2[] = { 0, 1, 2, 3 };
+ const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ const int ia2[] = {0, 1, 2, 3};
MyAlloc a1(&allocated1);
MyAlloc a2(&allocated2);
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
@@ -1722,67 +1675,53 @@ TEST(AllocatorSupportTest, SwapOneAllocated) {
EXPECT_THAT(allocated2, 0);
}
-TEST(AllocatorSupportTest, ScopedAllocatorWorks) {
+TEST(AllocatorSupportTest, ScopedAllocatorWorksInlined) {
using StdVector = std::vector<int, CountingAllocator<int>>;
- using MyAlloc =
- std::scoped_allocator_adaptor<CountingAllocator<StdVector>>;
- using AllocVec = absl::InlinedVector<StdVector, 4, MyAlloc>;
-
- // MSVC 2017's std::vector allocates different amounts of memory in debug
- // versus opt mode.
- int64_t test_allocated = 0;
- StdVector v(CountingAllocator<int>{&test_allocated});
- // The amount of memory allocated by a default constructed vector<int>
- auto default_std_vec_allocated = test_allocated;
- v.push_back(1);
- // The amound of memory allocated by a copy-constructed vector<int> with one
- // element.
- int64_t one_element_std_vec_copy_allocated = test_allocated;
+ using Alloc = CountingAllocator<StdVector>;
+ using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
+ using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
- int64_t allocated = 0;
- AllocVec vec(MyAlloc{CountingAllocator<StdVector>{&allocated}});
- EXPECT_EQ(allocated, 0);
+ int64_t total_allocated_byte_count = 0;
- // This default constructs a vector<int>, but the allocator should pass itself
- // into the vector<int>, so check allocation compared to that.
- // The absl::InlinedVector does not allocate any memory.
- // The vector<int> may allocate any memory.
- auto expected = default_std_vec_allocated;
- vec.resize(1);
- EXPECT_EQ(allocated, expected);
-
- // We make vector<int> allocate memory.
- // It must go through the allocator even though we didn't construct the
- // vector directly. This assumes that vec[0] doesn't need to grow its
- // allocation.
- expected += sizeof(int);
- vec[0].push_back(1);
- EXPECT_EQ(allocated, expected);
-
- // Another allocating vector.
- expected += one_element_std_vec_copy_allocated;
- vec.push_back(vec[0]);
- EXPECT_EQ(allocated, expected);
-
- // Overflow the inlined memory.
- // The absl::InlinedVector will now allocate.
- expected += sizeof(StdVector) * 8 + default_std_vec_allocated * 3;
- vec.resize(5);
- EXPECT_EQ(allocated, expected);
-
- // Adding one more in external mode should also work.
- expected += one_element_std_vec_copy_allocated;
- vec.push_back(vec[0]);
- EXPECT_EQ(allocated, expected);
-
- // And extending these should still work. This assumes that vec[0] does not
- // need to grow its allocation.
- expected += sizeof(int);
- vec[0].push_back(1);
- EXPECT_EQ(allocated, expected);
-
- vec.clear();
- EXPECT_EQ(allocated, 0);
+ AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
+
+ // Called only once to remain inlined
+ inlined_case.emplace_back();
+
+ int64_t absl_responsible_for_count = total_allocated_byte_count;
+ EXPECT_EQ(absl_responsible_for_count, 0);
+
+ inlined_case[0].emplace_back();
+ EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
+
+ inlined_case.clear();
+ inlined_case.shrink_to_fit();
+ EXPECT_EQ(total_allocated_byte_count, 0);
+}
+
+TEST(AllocatorSupportTest, ScopedAllocatorWorksAllocated) {
+ using StdVector = std::vector<int, CountingAllocator<int>>;
+ using Alloc = CountingAllocator<StdVector>;
+ using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
+ using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
+
+ int64_t total_allocated_byte_count = 0;
+
+ AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
+
+ // Called twice to force into being allocated
+ allocated_case.emplace_back();
+ allocated_case.emplace_back();
+
+ int64_t absl_responsible_for_count = total_allocated_byte_count;
+ EXPECT_GT(absl_responsible_for_count, 0);
+
+ allocated_case[1].emplace_back();
+ EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
+
+ allocated_case.clear();
+ allocated_case.shrink_to_fit();
+ EXPECT_EQ(total_allocated_byte_count, 0);
}
TEST(AllocatorSupportTest, SizeAllocConstructor) {
@@ -1811,4 +1750,23 @@ TEST(AllocatorSupportTest, SizeAllocConstructor) {
}
}
+TEST(InlinedVectorTest, AbslHashValueWorks) {
+ using V = absl::InlinedVector<int, 4>;
+ std::vector<V> cases;
+
+ // Generate a variety of vectors some of these are small enough for the inline
+ // space but are stored out of line.
+ for (int i = 0; i < 10; ++i) {
+ V v;
+ for (int j = 0; j < i; ++j) {
+ v.push_back(j);
+ }
+ cases.push_back(v);
+ v.resize(i % 4);
+ cases.push_back(v);
+ }
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
+}
+
} // anonymous namespace
diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h
new file mode 100644
index 00000000..a02cd5c3
--- /dev/null
+++ b/absl/container/internal/common.h
@@ -0,0 +1,198 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "absl/meta/type_traits.h"
+#include "absl/types/optional.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+template <class, class = void>
+struct IsTransparent : std::false_type {};
+template <class T>
+struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
+ : std::true_type {};
+
+template <bool is_transparent>
+struct KeyArg {
+ // Transparent. Forward `K`.
+ template <typename K, typename key_type>
+ using type = K;
+};
+
+template <>
+struct KeyArg<false> {
+ // Not transparent. Always use `key_type`.
+ template <typename K, typename key_type>
+ using type = key_type;
+};
+
+// The node_handle concept from C++17.
+// We specialize node_handle for sets and maps. node_handle_base holds the
+// common API of both.
+template <typename PolicyTraits, typename Alloc>
+class node_handle_base {
+ protected:
+ using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+ using allocator_type = Alloc;
+
+ constexpr node_handle_base() {}
+ node_handle_base(node_handle_base&& other) noexcept {
+ *this = std::move(other);
+ }
+ ~node_handle_base() { destroy(); }
+ node_handle_base& operator=(node_handle_base&& other) noexcept {
+ destroy();
+ if (!other.empty()) {
+ alloc_ = other.alloc_;
+ PolicyTraits::transfer(alloc(), slot(), other.slot());
+ other.reset();
+ }
+ return *this;
+ }
+
+ bool empty() const noexcept { return !alloc_; }
+ explicit operator bool() const noexcept { return !empty(); }
+ allocator_type get_allocator() const { return *alloc_; }
+
+ protected:
+ friend struct CommonAccess;
+
+ struct transfer_tag_t {};
+ node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::transfer(alloc(), slot(), s);
+ }
+
+ struct move_tag_t {};
+ node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::construct(alloc(), slot(), s);
+ }
+
+ void destroy() {
+ if (!empty()) {
+ PolicyTraits::destroy(alloc(), slot());
+ reset();
+ }
+ }
+
+ void reset() {
+ assert(alloc_.has_value());
+ alloc_ = absl::nullopt;
+ }
+
+ slot_type* slot() const {
+ assert(!empty());
+ return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+ }
+ allocator_type* alloc() { return std::addressof(*alloc_); }
+
+ private:
+ absl::optional<allocator_type> alloc_;
+ mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)>
+ slot_space_;
+};
+
+// For sets.
+template <typename Policy, typename PolicyTraits, typename Alloc,
+ typename = void>
+class node_handle : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = typename node_handle::node_handle_base;
+
+ public:
+ using value_type = typename PolicyTraits::value_type;
+
+ constexpr node_handle() {}
+
+ value_type& value() const { return PolicyTraits::element(this->slot()); }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// For maps.
+template <typename Policy, typename PolicyTraits, typename Alloc>
+class node_handle<Policy, PolicyTraits, Alloc,
+ absl::void_t<typename Policy::mapped_type>>
+ : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = typename node_handle::node_handle_base;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+
+ constexpr node_handle() {}
+
+ auto key() const -> decltype(PolicyTraits::key(this->slot())) {
+ return PolicyTraits::key(this->slot());
+ }
+
+ mapped_type& mapped() const {
+ return PolicyTraits::value(&PolicyTraits::element(this->slot()));
+ }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// Provide access to non-public node-handle functions.
+struct CommonAccess {
+ template <typename Node>
+ static auto GetSlot(const Node& node) -> decltype(node.slot()) {
+ return node.slot();
+ }
+
+ template <typename Node>
+ static void Reset(Node* node) {
+ node->reset();
+ }
+
+ template <typename T, typename... Args>
+ static T Transfer(Args&&... args) {
+ return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args>
+ static T Move(Args&&... args) {
+ return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ }
+};
+
+// Implement the insert_return_type<> concept of C++17.
+template <class Iterator, class NodeType>
+struct InsertReturnType {
+ Iterator position;
+ bool inserted;
+ NodeType node;
+};
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index 29fe7c12..fbace496 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -27,27 +27,28 @@
// const T2& t2 = value.get<2>();
// ...
//
-// http://en.cppreference.com/w/cpp/language/ebo
+// https://en.cppreference.com/w/cpp/language/ebo
#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#include <initializer_list>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/utility/utility.h"
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(__NVCC__)
// We need to mark these classes with this declspec to ensure that
// CompressedTuple happens.
#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
-#else // _MSC_VER
+#else
#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
-#endif // _MSC_VER
+#endif
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <typename... Ts>
@@ -76,57 +77,110 @@ constexpr bool IsFinal() {
#endif
}
+// We can't use EBCO on other CompressedTuples because that would mean that we
+// derive from multiple Storage<> instantiations with the same I parameter,
+// and potentially from multiple identical Storage<> instantiations. So anytime
+// we use type inheritance rather than encapsulation, we mark
+// CompressedTupleImpl, to make this easy to detect.
+struct uses_inheritance {};
+
template <typename T>
constexpr bool ShouldUseBase() {
- return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>();
+ return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ !std::is_base_of<uses_inheritance, T>::value;
}
// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
-template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()>
+template <typename T, size_t I,
+#if defined(_MSC_VER)
+ bool UseBase =
+ ShouldUseBase<typename std::enable_if<true, T>::type>()>
+#else
+ bool UseBase = ShouldUseBase<T>()>
+#endif
struct Storage {
- using T = ElemT<D, I>;
T value;
constexpr Storage() = default;
- explicit constexpr Storage(T&& v) : value(absl::forward<T>(v)) {}
- constexpr const T& get() const { return value; }
- T& get() { return value; }
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : value(absl::forward<V>(v)) {}
+ constexpr const T& get() const& { return value; }
+ T& get() & { return value; }
+ constexpr const T&& get() const&& { return absl::move(*this).value; }
+ T&& get() && { return std::move(*this).value; }
};
-template <typename D, size_t I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
- : ElemT<D, I> {
- using T = internal_compressed_tuple::ElemT<D, I>;
+template <typename T, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;
- explicit constexpr Storage(T&& v) : T(absl::forward<T>(v)) {}
- constexpr const T& get() const { return *this; }
- T& get() { return *this; }
+
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : T(absl::forward<V>(v)) {}
+
+ constexpr const T& get() const& { return *this; }
+ T& get() & { return *this; }
+ constexpr const T&& get() const&& { return absl::move(*this); }
+ T&& get() && { return std::move(*this); }
};
-template <typename D, typename I>
+template <typename D, typename I, bool ShouldAnyUseBase>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
-template <typename... Ts, size_t... I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
- CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence<I...>>
+template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
// We use the dummy identity function through std::integral_constant to
// convince MSVC of accepting and expanding I in that context. Without it
// you would get:
// error C3548: 'I': parameter pack cannot be used in this context
- : Storage<CompressedTuple<Ts...>,
- std::integral_constant<size_t, I>::value>... {
+ : uses_inheritance,
+ Storage<Ts, std::integral_constant<size_t, I>::value>... {
constexpr CompressedTupleImpl() = default;
- explicit constexpr CompressedTupleImpl(Ts&&... args)
- : Storage<CompressedTuple<Ts...>, I>(absl::forward<Ts>(args))... {}
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
};
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
+ // We use the dummy identity function as above...
+ : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+std::false_type Or(std::initializer_list<std::false_type>);
+std::true_type Or(std::initializer_list<bool>);
+
+// MSVC requires this to be done separately rather than within the declaration
+// of CompressedTuple below.
+template <typename... Ts>
+constexpr bool ShouldAnyUseBase() {
+ return decltype(
+ Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
+}
+
+template <typename T, typename V>
+using TupleMoveConstructible = typename std::conditional<
+ std::is_reference<T>::value, std::is_convertible<V, T>,
+ std::is_constructible<T, V&&>>::type;
+
} // namespace internal_compressed_tuple
// Helper class to perform the Empty Base Class Optimization.
// Ts can contain classes and non-classes, empty or not. For the ones that
// are empty classes, we perform the CompressedTuple. If all types in Ts are
-// empty classes, then CompressedTuple<Ts...> is itself an empty class.
+// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
+// does not apply when one or more of those empty classes is itself an empty
+// CompressedTuple.)
//
// To access the members, use member .get<N>() function.
//
@@ -138,28 +192,62 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
// const T2& t2 = value.get<2>();
// ...
//
-// http://en.cppreference.com/w/cpp/language/ebo
+// https://en.cppreference.com/w/cpp/language/ebo
template <typename... Ts>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
- CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>> {
+ CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
+ internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
+ template <int I>
+ using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
+
public:
+ // There seems to be a bug in MSVC dealing in which using '=default' here will
+ // cause the compiler to ignore the body of other constructors. The work-
+ // around is to explicitly implement the default constructor.
+#if defined(_MSC_VER)
+ constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
+#else
constexpr CompressedTuple() = default;
- explicit constexpr CompressedTuple(Ts... base)
- : CompressedTuple::CompressedTupleImpl(absl::forward<Ts>(base)...) {}
+#endif
+ explicit constexpr CompressedTuple(const Ts&... base)
+ : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
+
+ template <typename... Vs,
+ absl::enable_if_t<
+ absl::conjunction<
+ // Ensure we are not hiding default copy/move constructors.
+ absl::negation<std::is_same<void(CompressedTuple),
+ void(absl::decay_t<Vs>...)>>,
+ internal_compressed_tuple::TupleMoveConstructible<
+ Ts, Vs&&>...>::value,
+ bool> = true>
+ explicit constexpr CompressedTuple(Vs&&... base)
+ : CompressedTuple::CompressedTupleImpl(absl::in_place,
+ absl::forward<Vs>(base)...) {}
+
+ template <int I>
+ ElemT<I>& get() & {
+ return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>& get() const& {
+ return StorageT<I>::get();
+ }
template <int I>
- ElemT<I>& get() {
- return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ ElemT<I>&& get() && {
+ return std::move(*this).StorageT<I>::get();
}
template <int I>
- constexpr const ElemT<I>& get() const {
- return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ constexpr const ElemT<I>&& get() const&& {
+ return absl::move(*this).StorageT<I>::get();
}
};
@@ -169,7 +257,7 @@ template <>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 2b5ed4a4..ec893b90 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,18 +14,26 @@
#include "absl/container/internal/compressed_tuple.h"
+#include <memory>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/memory/memory.h"
+#include "absl/types/any.h"
+#include "absl/types/optional.h"
+#include "absl/utility/utility.h"
-namespace absl {
-inline namespace lts_2018_12_18 {
-namespace container_internal {
-namespace {
+// These are declared at global scope purely so that error messages
+// are smaller and easier to understand.
+enum class CallType { kConstRef, kConstMove };
template <int>
-struct Empty {};
+struct Empty {
+ constexpr CallType value() const& { return CallType::kConstRef; }
+ constexpr CallType value() const&& { return CallType::kConstMove; }
+};
template <typename T>
struct NotEmpty {
@@ -38,6 +46,15 @@ struct TwoValues {
U value2;
};
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::InstanceTracker;
+
TEST(CompressedTupleTest, Sizeof) {
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
@@ -53,6 +70,141 @@ TEST(CompressedTupleTest, Sizeof) {
sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
}
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
+ InstanceTracker tracker;
+ CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
+ EXPECT_EQ(tracker.instances(), 1);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_LE(tracker.moves(), 1);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) {
+ InstanceTracker tracker;
+
+ CopyableMovableInstance i1(1);
+ CompressedTuple<CopyableMovableInstance> x1(std::move(i1));
+ EXPECT_EQ(tracker.instances(), 2);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_LE(tracker.moves(), 1);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CopyableMovableInstance i2(2);
+ Empty<0> empty;
+ CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+ x1(std::move(i1), i2, empty);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(x1.get<1>().value(), 2);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+struct IncompleteType;
+CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>
+MakeWithIncomplete(CopyableMovableInstance i1,
+ IncompleteType& t, // NOLINT
+ Empty<0> empty) {
+ return CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>{
+ std::move(i1), t, empty};
+}
+
+struct IncompleteType {};
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ Empty<0> empty;
+ struct DerivedType : IncompleteType {int value = 0;};
+ DerivedType fd;
+ fd.value = 7;
+
+ CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>> x1 =
+ MakeWithIncomplete(std::move(i1), fd, empty);
+
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(static_cast<DerivedType&>(x1.get<1>()).value, 7);
+
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 2);
+}
+
+TEST(CompressedTupleTest,
+ OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CopyableMovableInstance i2(2);
+ CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+ x1(std::move(i1), i2, {}); // NOLINT
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(x1.get<1>().value(), 2);
+ EXPECT_EQ(tracker.instances(), 3);
+ // We are forced into the `const Ts&...` constructor (invoking copies)
+ // because we need it to deduce the type of `{}`.
+ // std::tuple also has this behavior.
+ // Note, this test is proof that this is expected behavior, but it is not
+ // _desired_ behavior.
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueConstruction) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+
+ CompressedTuple<CopyableMovableInstance> x1(i1);
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance i2(2);
+ const CopyableMovableInstance& i2_ref = i2;
+ CompressedTuple<CopyableMovableInstance> x2(i2_ref);
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueAccess) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CompressedTuple<CopyableMovableInstance> x(std::move(i1));
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance i2 = std::move(x).get<0>();
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueAccess) {
+ InstanceTracker tracker;
+
+ CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+
+ CopyableMovableInstance t = x.get<0>();
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, ZeroCopyOnRefAccess) {
+ InstanceTracker tracker;
+
+ CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+
+ CopyableMovableInstance& t1 = x.get<0>();
+ const CopyableMovableInstance& t2 = x.get<0>();
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+ EXPECT_EQ(t1.value(), 0);
+ EXPECT_EQ(t2.value(), 0);
+}
+
TEST(CompressedTupleTest, Access) {
struct S {
std::string x;
@@ -113,9 +265,14 @@ TEST(CompressedTupleTest, Nested) {
EXPECT_EQ(4 * sizeof(char),
sizeof(CompressedTuple<CompressedTuple<char, char>,
CompressedTuple<char, char>>));
- EXPECT_TRUE(
- (std::is_empty<CompressedTuple<CompressedTuple<Empty<0>>,
- CompressedTuple<Empty<1>>>>::value));
+ EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
+
+ // Make sure everything still works when things are nested.
+ struct CT_Empty : CompressedTuple<Empty<0>> {};
+ CompressedTuple<Empty<0>, CT_Empty> nested_empty;
+ auto contained = nested_empty.get<0>();
+ auto nested = nested_empty.get<1>().get<0>();
+ EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
}
TEST(CompressedTupleTest, Reference) {
@@ -141,15 +298,103 @@ TEST(CompressedTupleTest, NoElements) {
EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value);
}
+TEST(CompressedTupleTest, MoveOnlyElements) {
+ CompressedTuple<std::unique_ptr<std::string>> str_tup(
+ absl::make_unique<std::string>("str"));
+
+ CompressedTuple<CompressedTuple<std::unique_ptr<std::string>>,
+ std::unique_ptr<int>>
+ x(std::move(str_tup), absl::make_unique<int>(5));
+
+ EXPECT_EQ(*x.get<0>().get<0>(), "str");
+ EXPECT_EQ(*x.get<1>(), 5);
+
+ std::unique_ptr<std::string> x0 = std::move(x.get<0>()).get<0>();
+ std::unique_ptr<int> x1 = std::move(x).get<1>();
+
+ EXPECT_EQ(*x0, "str");
+ EXPECT_EQ(*x1, 5);
+}
+
+TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) {
+ CompressedTuple<std::unique_ptr<std::string>> base(
+ absl::make_unique<std::string>("str"));
+ EXPECT_EQ(*base.get<0>(), "str");
+
+ CompressedTuple<std::unique_ptr<std::string>> copy(std::move(base));
+ EXPECT_EQ(*copy.get<0>(), "str");
+}
+
+TEST(CompressedTupleTest, AnyElements) {
+ any a(std::string("str"));
+ CompressedTuple<any, any&> x(any(5), a);
+ EXPECT_EQ(absl::any_cast<int>(x.get<0>()), 5);
+ EXPECT_EQ(absl::any_cast<std::string>(x.get<1>()), "str");
+
+ a = 0.5f;
+ EXPECT_EQ(absl::any_cast<float>(x.get<1>()), 0.5);
+
+ // Ensure copy construction work in the face of a type with a universal
+ // implicit constructor;
+ CompressedTuple<absl::any> c{}, d(c); // NOLINT
+}
+
TEST(CompressedTupleTest, Constexpr) {
- constexpr CompressedTuple<int, double, CompressedTuple<int>> x(
- 7, 1.25, CompressedTuple<int>(5));
+ struct NonTrivialStruct {
+ constexpr NonTrivialStruct() = default;
+ constexpr int value() const { return v; }
+ int v = 5;
+ };
+ struct TrivialStruct {
+ TrivialStruct() = default;
+ constexpr int value() const { return v; }
+ int v;
+ };
+ constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
+ 7, 1.25, CompressedTuple<int>(5), {});
constexpr int x0 = x.get<0>();
constexpr double x1 = x.get<1>();
constexpr int x2 = x.get<2>().get<0>();
+ constexpr CallType x3 = x.get<3>().value();
+
EXPECT_EQ(x0, 7);
EXPECT_EQ(x1, 1.25);
EXPECT_EQ(x2, 5);
+ EXPECT_EQ(x3, CallType::kConstRef);
+
+#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4
+ constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
+ constexpr CallType trivial0 = trivial.get<0>().value();
+ constexpr int trivial1 = trivial.get<1>().value();
+ constexpr int trivial2 = trivial.get<2>();
+
+ EXPECT_EQ(trivial0, CallType::kConstRef);
+ EXPECT_EQ(trivial1, 0);
+ EXPECT_EQ(trivial2, 0);
+#endif
+
+ constexpr CompressedTuple<Empty<0>, NonTrivialStruct, absl::optional<int>>
+ non_trivial = {};
+ constexpr CallType non_trivial0 = non_trivial.get<0>().value();
+ constexpr int non_trivial1 = non_trivial.get<1>().value();
+ constexpr absl::optional<int> non_trivial2 = non_trivial.get<2>();
+
+ EXPECT_EQ(non_trivial0, CallType::kConstRef);
+ EXPECT_EQ(non_trivial1, 5);
+ EXPECT_EQ(non_trivial2, absl::nullopt);
+
+ static constexpr char data[] = "DEF";
+ constexpr CompressedTuple<const char*> z(data);
+ constexpr const char* z1 = z.get<0>();
+ EXPECT_EQ(std::string(z1), std::string(data));
+
+#if defined(__clang__)
+ // An apparent bug in earlier versions of gcc claims these are ambiguous.
+ constexpr int x2m = absl::move(x.get<2>()).get<0>();
+ constexpr CallType x3m = absl::move(x).get<3>().value();
+ EXPECT_EQ(x2m, 5);
+ EXPECT_EQ(x3m, CallType::kConstMove);
+#endif
}
#if defined(__clang__) || defined(__GNUC__)
@@ -164,5 +409,5 @@ TEST(CompressedTupleTest, EmptyFinalClass) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index ddccbe05..eb6d7eb7 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -34,7 +34,7 @@
#include "absl/utility/utility.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// Allocates at least n bytes aligned to the specified alignment.
@@ -287,13 +287,48 @@ struct IsLayoutCompatible {
} // namespace memory_internal
-// If kMutableKeys is false, only the value member is accessed.
+// The internal storage type for key-value containers like flat_hash_map.
//
-// If kMutableKeys is true, key is accessed through all slots while value and
-// mutable_value are accessed only via INITIALIZED slots. Slots are created and
-// destroyed via mutable_value so that the key can be moved later.
+// It is convenient for the value_type of a flat_hash_map<K, V> to be
+// pair<const K, V>; the "const K" prevents accidental modification of the key
+// when dealing with the reference returned from find() and similar methods.
+// However, this creates other problems; we want to be able to emplace(K, V)
+// efficiently with move operations, and similarly be able to move a
+// pair<K, V> in insert().
+//
+// The solution is this union, which aliases the const and non-const versions
+// of the pair. This also allows flat_hash_map<const K, V> to work, even though
+// that has the same efficiency issues with move in emplace() and insert() -
+// but people do it anyway.
+//
+// If kMutableKeys is false, only the value member can be accessed.
+//
+// If kMutableKeys is true, key can be accessed through all slots while value
+// and mutable_value must be accessed only via INITIALIZED slots. Slots are
+// created and destroyed via mutable_value so that the key can be moved later.
+//
+// Accessing one of the union fields while the other is active is safe as
+// long as they are layout-compatible, which is guaranteed by the definition of
+// kMutableKeys. For C++11, the relevant section of the standard is
+// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
template <class K, class V>
-union slot_type {
+union map_slot_type {
+ map_slot_type() {}
+ ~map_slot_type() = delete;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
+ value_type value;
+ mutable_value_type mutable_value;
+ K key;
+};
+
+template <class K, class V>
+struct map_slot_policy {
+ using slot_type = map_slot_type<K, V>;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
private:
static void emplace(slot_type* slot) {
// The construction of union doesn't do anything at runtime but it allows us
@@ -303,19 +338,17 @@ union slot_type {
// If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
// or the other via slot_type. We are also free to access the key via
// slot_type::key in this case.
- using kMutableKeys =
- std::integral_constant<bool,
- memory_internal::IsLayoutCompatible<K, V>::value>;
+ using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
public:
- slot_type() {}
- ~slot_type() = delete;
- using value_type = std::pair<const K, V>;
- using mutable_value_type = std::pair<K, V>;
+ static value_type& element(slot_type* slot) { return slot->value; }
+ static const value_type& element(const slot_type* slot) {
+ return slot->value;
+ }
- value_type value;
- mutable_value_type mutable_value;
- K key;
+ static const K& key(const slot_type* slot) {
+ return kMutableKeys::value ? slot->key : slot->value.first;
+ }
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
@@ -401,7 +434,7 @@ union slot_type {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc
index da87ca20..ea9568dc 100644
--- a/absl/container/internal/container_memory_test.cc
+++ b/absl/container/internal/container_memory_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,7 +23,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -186,5 +186,5 @@ TEST(DecomposePair, NotDecomposable) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
new file mode 100644
index 00000000..94a457ca
--- /dev/null
+++ b/absl/container/internal/counting_allocator.h
@@ -0,0 +1,81 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator : public std::allocator<T> {
+ public:
+ using Alloc = std::allocator<T>;
+ using pointer = typename Alloc::pointer;
+ using size_type = typename Alloc::size_type;
+
+ CountingAllocator() : bytes_used_(nullptr) {}
+ explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+
+ template <typename U>
+ CountingAllocator(const CountingAllocator<U>& x)
+ : Alloc(x), bytes_used_(x.bytes_used_) {}
+
+ pointer allocate(size_type n,
+ std::allocator<void>::const_pointer hint = nullptr) {
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ += n * sizeof(T);
+ return Alloc::allocate(n, hint);
+ }
+
+ void deallocate(pointer p, size_type n) {
+ Alloc::deallocate(p, n);
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ -= n * sizeof(T);
+ }
+
+ template<typename U>
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ friend bool operator==(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return a.bytes_used_ == b.bytes_used_;
+ }
+
+ friend bool operator!=(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return !(a == b);
+ }
+
+ int64_t* bytes_used_;
+};
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
index 72c75fa0..2155076d 100644
--- a/absl/container/internal/hash_function_defaults.h
+++ b/absl/container/internal/hash_function_defaults.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -39,8 +39,8 @@
// equal functions are still bound to T. This is important because some type U
// can be hashed by/tested for equality differently depending on T. A notable
// example is `const char*`. `const char*` is treated as a c-style string when
-// the hash function is hash<string> but as a pointer when the hash function is
-// hash<void*>.
+// the hash function is hash<std::string> but as a pointer when the hash
+// function is hash<void*>.
//
#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
@@ -56,7 +56,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// The hash of an object of type T is computed by using absl::Hash.
@@ -84,6 +84,7 @@ struct StringHashEq {
}
};
};
+
template <>
struct HashEq<std::string> : StringHashEq {};
template <>
@@ -139,7 +140,7 @@ template <class T>
using hash_default_eq = typename container_internal::HashEq<T>::Eq;
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index 4610843a..ce6133f8 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +22,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -78,14 +78,14 @@ struct EqString : ::testing::Test {
hash_default_eq<T> key_eq;
};
-TYPED_TEST_CASE(EqString, StringTypes);
+TYPED_TEST_SUITE(EqString, StringTypes);
template <class T>
struct HashString : ::testing::Test {
hash_default_hash<T> hasher;
};
-TYPED_TEST_CASE(HashString, StringTypes);
+TYPED_TEST_SUITE(HashString, StringTypes);
TYPED_TEST(EqString, Works) {
auto eq = this->key_eq;
@@ -122,14 +122,14 @@ struct EqPointer : ::testing::Test {
hash_default_eq<T> key_eq;
};
-TYPED_TEST_CASE(EqPointer, PointerTypes);
+TYPED_TEST_SUITE(EqPointer, PointerTypes);
template <class T>
struct HashPointer : ::testing::Test {
hash_default_hash<T> hasher;
};
-TYPED_TEST_CASE(HashPointer, PointerTypes);
+TYPED_TEST_SUITE(HashPointer, PointerTypes);
TYPED_TEST(EqPointer, Works) {
int dummy;
@@ -203,15 +203,11 @@ TYPED_TEST(HashPointer, Works) {
EXPECT_NE(hash(&dummy), hash(cuptr));
}
-// Cartesian product of (string, std::string, absl::string_view)
-// with (string, std::string, absl::string_view, const char*).
+// Cartesian product of (std::string, absl::string_view)
+// with (std::string, absl::string_view, const char*).
using StringTypesCartesianProduct = Types<
// clang-format off
- std::pair<std::string, std::string>,
- std::pair<std::string, absl::string_view>,
- std::pair<std::string, const char*>,
-
std::pair<absl::string_view, std::string>,
std::pair<absl::string_view, absl::string_view>,
std::pair<absl::string_view, const char*>>;
@@ -249,11 +245,11 @@ TYPED_TEST_P(StringLikeTest, HashEq) {
EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
}
-TYPED_TEST_CASE(StringLikeTest, StringTypesCartesianProduct);
+TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct);
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
enum Hash : size_t {
@@ -284,7 +280,7 @@ struct hash<Hashable<H>> {
} // namespace std
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -299,5 +295,5 @@ TEST(Delegate, HashDispatch) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc
index aef41d72..36b2571b 100644
--- a/absl/container/internal/hash_generator_testing.cc
+++ b/absl/container/internal/hash_generator_testing.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,7 +17,7 @@
#include <deque>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hash_internal {
namespace {
@@ -70,5 +70,5 @@ absl::string_view Generator<absl::string_view>::operator()() const {
} // namespace hash_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hash_generator_testing.h b/absl/container/internal/hash_generator_testing.h
index 65e88964..27962c35 100644
--- a/absl/container/internal/hash_generator_testing.h
+++ b/absl/container/internal/hash_generator_testing.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -31,7 +31,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hash_internal {
namespace generator_internal {
@@ -146,7 +146,7 @@ using GeneratedType = decltype(
} // namespace hash_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h
index 9c310ad4..8f0d2a52 100644
--- a/absl/container/internal/hash_policy_testing.h
+++ b/absl/container/internal/hash_policy_testing.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -30,7 +30,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hash_testing_internal {
@@ -163,7 +163,7 @@ auto keys(const Set& s)
}
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
diff --git a/absl/container/internal/hash_policy_testing_test.cc b/absl/container/internal/hash_policy_testing_test.cc
index 00c436b3..8fd1df00 100644
--- a/absl/container/internal/hash_policy_testing_test.cc
+++ b/absl/container/internal/hash_policy_testing_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,7 +17,7 @@
#include "gtest/gtest.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -41,5 +41,5 @@ TEST(_, Hash) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h
index 41e26212..3d87e821 100644
--- a/absl/container/internal/hash_policy_traits.h
+++ b/absl/container/internal/hash_policy_traits.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -23,7 +23,7 @@
#include "absl/meta/type_traits.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// Defines how slots are initialized/destroyed/moved.
@@ -185,7 +185,7 @@ struct hash_policy_traits {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc
index 07cecdfa..edfaf63e 100644
--- a/absl/container/internal/hash_policy_traits_test.cc
+++ b/absl/container/internal/hash_policy_traits_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -22,7 +22,7 @@
#include "gtest/gtest.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -140,5 +140,5 @@ TEST_F(Test, with_transfer) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h
index b6a43512..1d1a9c28 100644
--- a/absl/container/internal/hashtable_debug.h
+++ b/absl/container/internal/hashtable_debug.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -38,7 +38,7 @@
#include "absl/container/internal/hashtable_debug_hooks.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// Returns the number of probes required to lookup `key`. Returns 0 for a
@@ -61,7 +61,7 @@ std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
size_t num_probes = GetHashtableDebugNumProbes(
container,
absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
- v.resize(std::max(v.size(), num_probes + 1));
+ v.resize((std::max)(v.size(), num_probes + 1));
v[num_probes]++;
}
return v;
@@ -104,7 +104,7 @@ size_t LowerBoundAllocatedByteSize(size_t num_elements) {
}
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
diff --git a/absl/container/internal/hashtable_debug_hooks.h b/absl/container/internal/hashtable_debug_hooks.h
index 50ba6ba5..7b95fcef 100644
--- a/absl/container/internal/hashtable_debug_hooks.h
+++ b/absl/container/internal/hashtable_debug_hooks.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,7 +24,7 @@
#include <vector>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace hashtable_debug_internal {
@@ -77,7 +77,7 @@ struct HashtableDebugAccess {
} // namespace hashtable_debug_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
new file mode 100644
index 00000000..2338045d
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -0,0 +1,310 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/memory/memory.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+constexpr int HashtablezInfo::kMaxStackDepth;
+
+namespace {
+ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
+ false
+};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
+
+// Returns the next pseudo-random value.
+// pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48
+// This is the lrand64 generator.
+uint64_t NextRandom(uint64_t rnd) {
+ const uint64_t prng_mult = uint64_t{0x5DEECE66D};
+ const uint64_t prng_add = 0xB;
+ const uint64_t prng_mod_power = 48;
+ const uint64_t prng_mod_mask = ~(~uint64_t{0} << prng_mod_power);
+ return (prng_mult * rnd + prng_add) & prng_mod_mask;
+}
+
+// Generates a geometric variable with the specified mean.
+// This is done by generating a random number between 0 and 1 and applying
+// the inverse cumulative distribution function for an exponential.
+// Specifically: Let m be the inverse of the sample period, then
+// the probability distribution function is m*exp(-mx) so the CDF is
+// p = 1 - exp(-mx), so
+// q = 1 - p = exp(-mx)
+// log_e(q) = -mx
+// -log_e(q)/m = x
+// log_2(q) * (-log_e(2) * 1/m) = x
+// In the code, q is actually in the range 1 to 2**26, hence the -26 below
+//
+int64_t GetGeometricVariable(int64_t mean) {
+#if ABSL_HAVE_THREAD_LOCAL
+ thread_local
+#else // ABSL_HAVE_THREAD_LOCAL
+ // SampleSlow and hence GetGeometricVariable is guarded by a single mutex when
+ // there are not thread locals. Thus, a single global rng is acceptable for
+ // that case.
+ static
+#endif // ABSL_HAVE_THREAD_LOCAL
+ uint64_t rng = []() {
+ // We don't get well distributed numbers from this so we call
+ // NextRandom() a bunch to mush the bits around. We use a global_rand
+ // to handle the case where the same thread (by memory address) gets
+ // created and destroyed repeatedly.
+ ABSL_CONST_INIT static std::atomic<uint32_t> global_rand(0);
+ uint64_t r = reinterpret_cast<uint64_t>(&rng) +
+ global_rand.fetch_add(1, std::memory_order_relaxed);
+ for (int i = 0; i < 20; ++i) {
+ r = NextRandom(r);
+ }
+ return r;
+ }();
+
+ rng = NextRandom(rng);
+
+ // Take the top 26 bits as the random number
+ // (This plus the 1<<58 sampling bound give a max possible step of
+ // 5194297183973780480 bytes.)
+ const uint64_t prng_mod_power = 48; // Number of bits in prng
+ // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
+ // under piii debug for some binaries.
+ double q = static_cast<uint32_t>(rng >> (prng_mod_power - 26)) + 1.0;
+ // Put the computed p-value through the CDF of a geometric.
+ double interval = (log2(q) - 26) * (-std::log(2.0) * mean);
+
+ // Very large values of interval overflow int64_t. If we happen to
+ // hit such improbable condition, we simply cheat and clamp interval
+ // to largest supported value.
+ if (interval > static_cast<double>(std::numeric_limits<int64_t>::max() / 2)) {
+ return std::numeric_limits<int64_t>::max() / 2;
+ }
+
+ // Small values of interval are equivalent to just sampling next time.
+ if (interval < 1) {
+ return 1;
+ }
+ return static_cast<int64_t>(interval);
+}
+
+} // namespace
+
+HashtablezSampler& HashtablezSampler::Global() {
+ static auto* sampler = new HashtablezSampler();
+ return *sampler;
+}
+
+HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
+ DisposeCallback f) {
+ return dispose_.exchange(f, std::memory_order_relaxed);
+}
+
+HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::~HashtablezInfo() = default;
+
+void HashtablezInfo::PrepareForSampling() {
+ capacity.store(0, std::memory_order_relaxed);
+ size.store(0, std::memory_order_relaxed);
+ num_erases.store(0, std::memory_order_relaxed);
+ max_probe_length.store(0, std::memory_order_relaxed);
+ total_probe_length.store(0, std::memory_order_relaxed);
+ hashes_bitwise_or.store(0, std::memory_order_relaxed);
+ hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+
+ create_time = absl::Now();
+ // The inliner makes hardcoded skip_count difficult (especially when combined
+ // with LTO). We use the ability to exclude stacks by regex when encoding
+ // instead.
+ depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
+ /* skip_count= */ 0);
+ dead = nullptr;
+}
+
+HashtablezSampler::HashtablezSampler()
+ : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
+ absl::MutexLock l(&graveyard_.init_mu);
+ graveyard_.dead = &graveyard_;
+}
+
+HashtablezSampler::~HashtablezSampler() {
+ HashtablezInfo* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ HashtablezInfo* next = s->next;
+ delete s;
+ s = next;
+ }
+}
+
+void HashtablezSampler::PushNew(HashtablezInfo* sample) {
+ sample->next = all_.load(std::memory_order_relaxed);
+ while (!all_.compare_exchange_weak(sample->next, sample,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ }
+}
+
+void HashtablezSampler::PushDead(HashtablezInfo* sample) {
+ if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
+ dispose(*sample);
+ }
+
+ absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+ absl::MutexLock sample_lock(&sample->init_mu);
+ sample->dead = graveyard_.dead;
+ graveyard_.dead = sample;
+}
+
+HashtablezInfo* HashtablezSampler::PopDead() {
+ absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+
+ // The list is circular, so eventually it collapses down to
+ // graveyard_.dead == &graveyard_
+ // when it is empty.
+ HashtablezInfo* sample = graveyard_.dead;
+ if (sample == &graveyard_) return nullptr;
+
+ absl::MutexLock sample_lock(&sample->init_mu);
+ graveyard_.dead = sample->dead;
+ sample->PrepareForSampling();
+ return sample;
+}
+
+HashtablezInfo* HashtablezSampler::Register() {
+ int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
+ if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+ dropped_samples_.fetch_add(1, std::memory_order_relaxed);
+ return nullptr;
+ }
+
+ HashtablezInfo* sample = PopDead();
+ if (sample == nullptr) {
+ // Resurrection failed. Hire a new warlock.
+ sample = new HashtablezInfo();
+ PushNew(sample);
+ }
+
+ return sample;
+}
+
+void HashtablezSampler::Unregister(HashtablezInfo* sample) {
+ PushDead(sample);
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+}
+
+int64_t HashtablezSampler::Iterate(
+ const std::function<void(const HashtablezInfo& stack)>& f) {
+ HashtablezInfo* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ absl::MutexLock l(&s->init_mu);
+ if (s->dead == nullptr) {
+ f(*s);
+ }
+ s = s->next;
+ }
+
+ return dropped_samples_.load(std::memory_order_relaxed);
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample) {
+ if (kAbslContainerInternalSampleEverything) {
+ *next_sample = 1;
+ return HashtablezSampler::Global().Register();
+ }
+
+ bool first = *next_sample < 0;
+ *next_sample = GetGeometricVariable(
+ g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+ // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
+ // low enough that we will start sampling in a reasonable time, so we just use
+ // the default sampling rate.
+ if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
+
+ // We will only be negative on our first count, so we should just retry in
+ // that case.
+ if (first) {
+ if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+ return SampleSlow(next_sample);
+ }
+
+ return HashtablezSampler::Global().Register();
+}
+
+#if ABSL_PER_THREAD_TLS == 1
+ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+#endif // ABSL_PER_THREAD_TLS == 1
+
+void UnsampleSlow(HashtablezInfo* info) {
+ HashtablezSampler::Global().Unregister(info);
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired) {
+ // SwissTables probe in groups of 16, so scale this to count items probes and
+ // not offset from desired.
+ size_t probe_length = distance_from_desired;
+#if SWISSTABLE_HAVE_SSE2
+ probe_length /= 16;
+#else
+ probe_length /= 8;
+#endif
+
+ info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+ info->max_probe_length.store(
+ std::max(info->max_probe_length.load(std::memory_order_relaxed),
+ probe_length),
+ std::memory_order_relaxed);
+ info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
+ info->size.fetch_add(1, std::memory_order_relaxed);
+}
+
+void SetHashtablezEnabled(bool enabled) {
+ g_hashtablez_enabled.store(enabled, std::memory_order_release);
+}
+
+void SetHashtablezSampleParameter(int32_t rate) {
+ if (rate > 0) {
+ g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
+ static_cast<long long>(rate)); // NOLINT(runtime/int)
+ }
+}
+
+void SetHashtablezMaxSamples(int32_t max) {
+ if (max > 0) {
+ g_hashtablez_max_samples.store(max, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
+ static_cast<long long>(max)); // NOLINT(runtime/int)
+ }
+}
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
new file mode 100644
index 00000000..f17c425c
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -0,0 +1,290 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hashtablez_sampler.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the API for a low level library to sample hashtables
+// and collect runtime statistics about them.
+//
+// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
+// store information about a single sample.
+//
+// `Record*` methods store information into samples.
+// `Sample()` and `Unsample()` make use of a single global sampler with
+// properties controlled by the flags hashtablez_enabled,
+// hashtablez_sample_rate, and hashtablez_max_samples.
+//
+// WARNING
+//
+// Using this sampling API may cause sampled Swiss tables to use the global
+// allocator (operator `new`) in addition to any custom allocator. If you
+// are using a table in an unusual circumstance where allocation or calling a
+// linux syscall is unacceptable, this could interfere.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+// Stores information about a sampled hashtable. All mutations to this *must*
+// be made through `Record*` functions below. All reads from this *must* only
+// occur in the callback to `HashtablezSampler::Iterate`.
+struct HashtablezInfo {
+ // Constructs the object but does not fill in any fields.
+ HashtablezInfo();
+ ~HashtablezInfo();
+ HashtablezInfo(const HashtablezInfo&) = delete;
+ HashtablezInfo& operator=(const HashtablezInfo&) = delete;
+
+ // Puts the object into a clean state, fills in the logically `const` members,
+ // blocking for any readers that are currently sampling the object.
+ void PrepareForSampling() EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+
+ // These fields are mutated by the various Record* APIs and need to be
+ // thread-safe.
+ std::atomic<size_t> capacity;
+ std::atomic<size_t> size;
+ std::atomic<size_t> num_erases;
+ std::atomic<size_t> max_probe_length;
+ std::atomic<size_t> total_probe_length;
+ std::atomic<size_t> hashes_bitwise_or;
+ std::atomic<size_t> hashes_bitwise_and;
+
+ // `HashtablezSampler` maintains intrusive linked lists for all samples. See
+ // comments on `HashtablezSampler::all_` for details on these. `init_mu`
+ // guards the ability to restore the sample to a pristine state. This
+ // prevents races with sampling and resurrecting an object.
+ absl::Mutex init_mu;
+ HashtablezInfo* next;
+ HashtablezInfo* dead GUARDED_BY(init_mu);
+
+ // All of the fields below are set by `PrepareForSampling`, they must not be
+ // mutated in `Record*` functions. They are logically `const` in that sense.
+ // These are guarded by init_mu, but that is not externalized to clients, who
+ // can only read them during `HashtablezSampler::Iterate` which will hold the
+ // lock.
+ static constexpr int kMaxStackDepth = 64;
+ absl::Time create_time;
+ int32_t depth;
+ void* stack[kMaxStackDepth];
+};
+
+inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#if SWISSTABLE_HAVE_SSE2
+ total_probe_length /= 16;
+#else
+ total_probe_length /= 8;
+#endif
+ info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+}
+
+inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity) {
+ info->size.store(size, std::memory_order_relaxed);
+ info->capacity.store(capacity, std::memory_order_relaxed);
+ if (size == 0) {
+ // This is a clear, reset the total/num_erases too.
+ RecordRehashSlow(info, 0);
+ }
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired);
+
+inline void RecordEraseSlow(HashtablezInfo* info) {
+ info->size.fetch_sub(1, std::memory_order_relaxed);
+ info->num_erases.fetch_add(1, std::memory_order_relaxed);
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample);
+void UnsampleSlow(HashtablezInfo* info);
+
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() : info_(nullptr) {}
+ explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
+ ~HashtablezInfoHandle() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ UnsampleSlow(info_);
+ }
+
+ HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
+ HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
+
+ HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
+ : info_(absl::exchange(o.info_, nullptr)) {}
+ HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
+ if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
+ UnsampleSlow(info_);
+ }
+ info_ = absl::exchange(o.info_, nullptr);
+ return *this;
+ }
+
+ inline void RecordStorageChanged(size_t size, size_t capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordStorageChangedSlow(info_, size, capacity);
+ }
+
+ inline void RecordRehash(size_t total_probe_length) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordRehashSlow(info_, total_probe_length);
+ }
+
+ inline void RecordInsert(size_t hash, size_t distance_from_desired) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordInsertSlow(info_, hash, distance_from_desired);
+ }
+
+ inline void RecordErase() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordEraseSlow(info_);
+ }
+
+ friend inline void swap(HashtablezInfoHandle& lhs,
+ HashtablezInfoHandle& rhs) {
+ std::swap(lhs.info_, rhs.info_);
+ }
+
+ private:
+ friend class HashtablezInfoHandlePeer;
+ HashtablezInfo* info_;
+};
+
+#if ABSL_PER_THREAD_TLS == 1
+extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+#endif // ABSL_PER_THREAD_TLS
+
+// Returns an RAII sampling handle that manages registration and unregistation
+// with the global sampler.
+inline HashtablezInfoHandle Sample() {
+#if ABSL_PER_THREAD_TLS == 0
+ static auto* mu = new absl::Mutex;
+ static int64_t global_next_sample = 0;
+ absl::MutexLock l(mu);
+#endif // !ABSL_HAVE_THREAD_LOCAL
+
+ if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ return HashtablezInfoHandle(nullptr);
+ }
+ return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+}
+
+// Holds samples and their associated stack traces with a soft limit of
+// `SetHashtablezMaxSamples()`.
+//
+// Thread safe.
+class HashtablezSampler {
+ public:
+ // Returns a global Sampler.
+ static HashtablezSampler& Global();
+
+ HashtablezSampler();
+ ~HashtablezSampler();
+
+ // Registers for sampling. Returns an opaque registration info.
+ HashtablezInfo* Register();
+
+ // Unregisters the sample.
+ void Unregister(HashtablezInfo* sample);
+
+ // The dispose callback will be called on all samples the moment they are
+ // being unregistered. Only affects samples that are unregistered after the
+ // callback has been set.
+ // Returns the previous callback.
+ using DisposeCallback = void (*)(const HashtablezInfo&);
+ DisposeCallback SetDisposeCallback(DisposeCallback f);
+
+ // Iterates over all the registered `StackInfo`s. Returning the number of
+ // samples that have been dropped.
+ int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
+
+ private:
+ void PushNew(HashtablezInfo* sample);
+ void PushDead(HashtablezInfo* sample);
+ HashtablezInfo* PopDead();
+
+ std::atomic<size_t> dropped_samples_;
+ std::atomic<size_t> size_estimate_;
+
+ // Intrusive lock free linked lists for tracking samples.
+ //
+ // `all_` records all samples (they are never removed from this list) and is
+ // terminated with a `nullptr`.
+ //
+ // `graveyard_.dead` is a circular linked list. When it is empty,
+ // `graveyard_.dead == &graveyard`. The list is circular so that
+ // every item on it (even the last) has a non-null dead pointer. This allows
+ // `Iterate` to determine if a given sample is live or dead using only
+ // information on the sample itself.
+ //
+ // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
+ // looks like this (G is the Graveyard):
+ //
+ // +---+ +---+ +---+ +---+ +---+
+ // all -->| A |--->| B |--->| C |--->| D |--->| E |
+ // | | | | | | | | | |
+ // +---+ | | +->| |-+ | | +->| |-+ | |
+ // | G | +---+ | +---+ | +---+ | +---+ | +---+
+ // | | | | | |
+ // | | --------+ +--------+ |
+ // +---+ |
+ // ^ |
+ // +--------------------------------------+
+ //
+ std::atomic<HashtablezInfo*> all_;
+ HashtablezInfo graveyard_;
+
+ std::atomic<DisposeCallback> dispose_;
+};
+
+// Enables or disables sampling for Swiss tables.
+void SetHashtablezEnabled(bool enabled);
+
+// Sets the rate at which Swiss tables will be sampled.
+void SetHashtablezSampleParameter(int32_t rate);
+
+// Sets a soft max for the number of samples that will be kept.
+void SetHashtablezMaxSamples(int32_t max);
+
+// Configuration override.
+// This allows process-wide sampling without depending on order of
+// initialization of static storage duration objects.
+// The definition of this constant is weak, which allows us to inject a
+// different value for it at link time.
+extern "C" const bool kAbslContainerInternalSampleEverything;
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
diff --git a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
new file mode 100644
index 00000000..d3f41c7c
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -0,0 +1,29 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include "absl/base/attributes.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+// See hashtablez_sampler.h for details.
+extern "C" ABSL_ATTRIBUTE_WEAK const bool
+ kAbslContainerInternalSampleEverything = false;
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
new file mode 100644
index 00000000..bdae75f3
--- /dev/null
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -0,0 +1,357 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <limits>
+#include <random>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/blocking_counter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/synchronization/notification.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+#if SWISSTABLE_HAVE_SSE2
+constexpr int kProbeLength = 16;
+#else
+constexpr int kProbeLength = 8;
+#endif
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+class HashtablezInfoHandlePeer {
+ public:
+ static bool IsSampled(const HashtablezInfoHandle& h) {
+ return h.info_ != nullptr;
+ }
+
+ static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
+};
+
+namespace {
+using ::absl::synchronization_internal::ThreadPool;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
+std::vector<size_t> GetSizes(HashtablezSampler* s) {
+ std::vector<size_t> res;
+ s->Iterate([&](const HashtablezInfo& info) {
+ res.push_back(info.size.load(std::memory_order_acquire));
+ });
+ return res;
+}
+
+HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
+ auto* info = s->Register();
+ assert(info != nullptr);
+ info->size.store(size);
+ return info;
+}
+
+TEST(HashtablezInfoTest, PrepareForSampling) {
+ absl::Time test_start = absl::Now();
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+
+ EXPECT_EQ(info.capacity.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ EXPECT_EQ(info.total_probe_length.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_GE(info.create_time, test_start);
+
+ info.capacity.store(1, std::memory_order_relaxed);
+ info.size.store(1, std::memory_order_relaxed);
+ info.num_erases.store(1, std::memory_order_relaxed);
+ info.max_probe_length.store(1, std::memory_order_relaxed);
+ info.total_probe_length.store(1, std::memory_order_relaxed);
+ info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
+ info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
+ info.create_time = test_start - absl::Hours(20);
+
+ info.PrepareForSampling();
+ EXPECT_EQ(info.capacity.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ EXPECT_EQ(info.total_probe_length.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_GE(info.create_time, test_start);
+}
+
+TEST(HashtablezInfoTest, RecordStorageChanged) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ RecordStorageChangedSlow(&info, 17, 47);
+ EXPECT_EQ(info.size.load(), 17);
+ EXPECT_EQ(info.capacity.load(), 47);
+ RecordStorageChangedSlow(&info, 20, 20);
+ EXPECT_EQ(info.size.load(), 20);
+ EXPECT_EQ(info.capacity.load(), 20);
+}
+
+TEST(HashtablezInfoTest, RecordInsert) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 6);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
+ RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 6);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
+ RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 12);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
+}
+
+TEST(HashtablezInfoTest, RecordErase) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 1);
+ RecordEraseSlow(&info);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 1);
+}
+
+TEST(HashtablezInfoTest, RecordRehash) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ RecordInsertSlow(&info, 0x1, 0);
+ RecordInsertSlow(&info, 0x2, kProbeLength);
+ RecordInsertSlow(&info, 0x4, kProbeLength);
+ RecordInsertSlow(&info, 0x8, 2 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 4);
+ EXPECT_EQ(info.total_probe_length.load(), 4);
+
+ RecordEraseSlow(&info);
+ RecordEraseSlow(&info);
+ EXPECT_EQ(info.size.load(), 2);
+ EXPECT_EQ(info.total_probe_length.load(), 4);
+ EXPECT_EQ(info.num_erases.load(), 2);
+
+ RecordRehashSlow(&info, 3 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 2);
+ EXPECT_EQ(info.total_probe_length.load(), 3);
+ EXPECT_EQ(info.num_erases.load(), 0);
+}
+
+TEST(HashtablezSamplerTest, SmallSampleParameter) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ for (int i = 0; i < 1000; ++i) {
+ int64_t next_sample = 0;
+ HashtablezInfo* sample = SampleSlow(&next_sample);
+ EXPECT_GT(next_sample, 0);
+ EXPECT_NE(sample, nullptr);
+ UnsampleSlow(sample);
+ }
+}
+
+TEST(HashtablezSamplerTest, LargeSampleParameter) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
+
+ for (int i = 0; i < 1000; ++i) {
+ int64_t next_sample = 0;
+ HashtablezInfo* sample = SampleSlow(&next_sample);
+ EXPECT_GT(next_sample, 0);
+ EXPECT_NE(sample, nullptr);
+ UnsampleSlow(sample);
+ }
+}
+
+TEST(HashtablezSamplerTest, Sample) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+ int64_t num_sampled = 0;
+ int64_t total = 0;
+ double sample_rate = 0.0;
+ for (int i = 0; i < 1000000; ++i) {
+ HashtablezInfoHandle h = Sample();
+ ++total;
+ if (HashtablezInfoHandlePeer::IsSampled(h)) {
+ ++num_sampled;
+ }
+ sample_rate = static_cast<double>(num_sampled) / total;
+ if (0.005 < sample_rate && sample_rate < 0.015) break;
+ }
+ EXPECT_NEAR(sample_rate, 0.01, 0.005);
+}
+
+TEST(HashtablezSamplerTest, Handle) {
+ auto& sampler = HashtablezSampler::Global();
+ HashtablezInfoHandle h(sampler.Register());
+ auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
+ info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
+
+ bool found = false;
+ sampler.Iterate([&](const HashtablezInfo& h) {
+ if (&h == info) {
+ EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
+ found = true;
+ }
+ });
+ EXPECT_TRUE(found);
+
+ h = HashtablezInfoHandle();
+ found = false;
+ sampler.Iterate([&](const HashtablezInfo& h) {
+ if (&h == info) {
+ // this will only happen if some other thread has resurrected the info
+ // the old handle was using.
+ if (h.hashes_bitwise_and.load() == 0x12345678) {
+ found = true;
+ }
+ }
+ });
+ EXPECT_FALSE(found);
+}
+
+TEST(HashtablezSamplerTest, Registration) {
+ HashtablezSampler sampler;
+ auto* info1 = Register(&sampler, 1);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
+
+ auto* info2 = Register(&sampler, 2);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
+ info1->size.store(3);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
+
+ sampler.Unregister(info1);
+ sampler.Unregister(info2);
+}
+
+TEST(HashtablezSamplerTest, Unregistration) {
+ HashtablezSampler sampler;
+ std::vector<HashtablezInfo*> infos;
+ for (size_t i = 0; i < 3; ++i) {
+ infos.push_back(Register(&sampler, i));
+ }
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
+
+ sampler.Unregister(infos[1]);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
+
+ infos.push_back(Register(&sampler, 3));
+ infos.push_back(Register(&sampler, 4));
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
+ sampler.Unregister(infos[3]);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
+
+ sampler.Unregister(infos[0]);
+ sampler.Unregister(infos[2]);
+ sampler.Unregister(infos[4]);
+ EXPECT_THAT(GetSizes(&sampler), IsEmpty());
+}
+
+TEST(HashtablezSamplerTest, MultiThreaded) {
+ HashtablezSampler sampler;
+ Notification stop;
+ ThreadPool pool(10);
+
+ for (int i = 0; i < 10; ++i) {
+ pool.Schedule([&sampler, &stop]() {
+ std::random_device rd;
+ std::mt19937 gen(rd());
+
+ std::vector<HashtablezInfo*> infoz;
+ while (!stop.HasBeenNotified()) {
+ if (infoz.empty()) {
+ infoz.push_back(sampler.Register());
+ }
+ switch (std::uniform_int_distribution<>(0, 2)(gen)) {
+ case 0: {
+ infoz.push_back(sampler.Register());
+ break;
+ }
+ case 1: {
+ size_t p =
+ std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
+ HashtablezInfo* info = infoz[p];
+ infoz[p] = infoz.back();
+ infoz.pop_back();
+ sampler.Unregister(info);
+ break;
+ }
+ case 2: {
+ absl::Duration oldest = absl::ZeroDuration();
+ sampler.Iterate([&](const HashtablezInfo& info) {
+ oldest = std::max(oldest, absl::Now() - info.create_time);
+ });
+ ASSERT_GE(oldest, absl::ZeroDuration());
+ break;
+ }
+ }
+ }
+ });
+ }
+ // The threads will hammer away. Give it a little bit of time for tsan to
+ // spot errors.
+ absl::SleepFor(absl::Seconds(3));
+ stop.Notify();
+}
+
+TEST(HashtablezSamplerTest, Callback) {
+ HashtablezSampler sampler;
+
+ auto* info1 = Register(&sampler, 1);
+ auto* info2 = Register(&sampler, 2);
+
+ static const HashtablezInfo* expected;
+
+ auto callback = [](const HashtablezInfo& info) {
+ // We can't use `info` outside of this callback because the object will be
+ // disposed as soon as we return from here.
+ EXPECT_EQ(&info, expected);
+ };
+
+ // Set the callback.
+ EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
+ expected = info1;
+ sampler.Unregister(info1);
+
+ // Unset the callback.
+ EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
+ expected = nullptr; // no more calls.
+ sampler.Unregister(info2);
+}
+
+} // namespace
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
diff --git a/absl/container/internal/have_sse.h b/absl/container/internal/have_sse.h
new file mode 100644
index 00000000..43414418
--- /dev/null
+++ b/absl/container/internal/have_sse.h
@@ -0,0 +1,49 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Shared config probing for SSE instructions used in Swiss tables.
+#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+
+#ifndef SWISSTABLE_HAVE_SSE2
+#if defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
+#define SWISSTABLE_HAVE_SSE2 1
+#else
+#define SWISSTABLE_HAVE_SSE2 0
+#endif
+#endif
+
+#ifndef SWISSTABLE_HAVE_SSSE3
+#ifdef __SSSE3__
+#define SWISSTABLE_HAVE_SSSE3 1
+#else
+#define SWISSTABLE_HAVE_SSSE3 0
+#endif
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
+#error "Bad configuration!"
+#endif
+
+#if SWISSTABLE_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
new file mode 100644
index 00000000..123e04c9
--- /dev/null
+++ b/absl/container/internal/inlined_vector.h
@@ -0,0 +1,895 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/types/span.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace inlined_vector_internal {
+
+template <typename Iterator>
+using IsAtLeastForwardIterator = std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>;
+
+template <typename AllocatorType>
+using IsMemcpyOk = absl::conjunction<
+ std::is_same<std::allocator<typename AllocatorType::value_type>,
+ AllocatorType>,
+ absl::is_trivially_copy_constructible<typename AllocatorType::value_type>,
+ absl::is_trivially_copy_assignable<typename AllocatorType::value_type>,
+ absl::is_trivially_destructible<typename AllocatorType::value_type>>;
+
+template <typename AllocatorType, typename ValueType, typename SizeType>
+void DestroyElements(AllocatorType* alloc_ptr, ValueType* destroy_first,
+ SizeType destroy_size) {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ if (destroy_first != nullptr) {
+ for (auto i = destroy_size; i != 0;) {
+ --i;
+ AllocatorTraits::destroy(*alloc_ptr, destroy_first + i);
+ }
+
+#ifndef NDEBUG
+ // Overwrite unused memory with `0xab` so we can catch uninitialized usage.
+ //
+ // Cast to `void*` to tell the compiler that we don't care that we might be
+ // scribbling on a vtable pointer.
+ auto* memory_ptr = static_cast<void*>(destroy_first);
+ auto memory_size = sizeof(ValueType) * destroy_size;
+ std::memset(memory_ptr, 0xab, memory_size);
+#endif // NDEBUG
+ }
+}
+
+template <typename AllocatorType, typename ValueType, typename ValueAdapter,
+ typename SizeType>
+void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first,
+ ValueAdapter* values_ptr, SizeType construct_size) {
+ for (SizeType i = 0; i < construct_size; ++i) {
+ ABSL_INTERNAL_TRY {
+ values_ptr->ConstructNext(alloc_ptr, construct_first + i);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i);
+ ABSL_INTERNAL_RETHROW;
+ }
+ }
+}
+
+template <typename ValueType, typename ValueAdapter, typename SizeType>
+void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr,
+ SizeType assign_size) {
+ for (SizeType i = 0; i < assign_size; ++i) {
+ values_ptr->AssignNext(assign_first + i);
+ }
+}
+
+template <typename AllocatorType>
+struct StorageView {
+ using pointer = typename AllocatorType::pointer;
+ using size_type = typename AllocatorType::size_type;
+
+ pointer data;
+ size_type size;
+ size_type capacity;
+};
+
+template <typename AllocatorType, typename Iterator>
+class IteratorValueAdapter {
+ using pointer = typename AllocatorType::pointer;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
+ ++it_;
+ }
+
+ void AssignNext(pointer assign_at) {
+ *assign_at = *it_;
+ ++it_;
+ }
+
+ private:
+ Iterator it_;
+};
+
+template <typename AllocatorType>
+class CopyValueAdapter {
+ using pointer = typename AllocatorType::pointer;
+ using const_pointer = typename AllocatorType::const_pointer;
+ using const_reference = typename AllocatorType::const_reference;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit CopyValueAdapter(const_reference v) : ptr_(std::addressof(v)) {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
+ }
+
+ void AssignNext(pointer assign_at) { *assign_at = *ptr_; }
+
+ private:
+ const_pointer ptr_;
+};
+
+template <typename AllocatorType>
+class DefaultValueAdapter {
+ using pointer = typename AllocatorType::pointer;
+ using value_type = typename AllocatorType::value_type;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit DefaultValueAdapter() {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at);
+ }
+
+ void AssignNext(pointer assign_at) { *assign_at = value_type(); }
+};
+
+template <typename AllocatorType>
+class AllocationTransaction {
+ using value_type = typename AllocatorType::value_type;
+ using pointer = typename AllocatorType::pointer;
+ using size_type = typename AllocatorType::size_type;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit AllocationTransaction(AllocatorType* alloc_ptr)
+ : alloc_data_(*alloc_ptr, nullptr) {}
+
+ ~AllocationTransaction() {
+ if (DidAllocate()) {
+ AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+ }
+ }
+
+ AllocationTransaction(const AllocationTransaction&) = delete;
+ void operator=(const AllocationTransaction&) = delete;
+
+ AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+ pointer& GetData() { return alloc_data_.template get<1>(); }
+ size_type& GetCapacity() { return capacity_; }
+
+ bool DidAllocate() { return GetData() != nullptr; }
+ pointer Allocate(size_type capacity) {
+ GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
+ GetCapacity() = capacity;
+ return GetData();
+ }
+
+ private:
+ container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
+ size_type capacity_ = 0;
+};
+
+template <typename AllocatorType>
+class ConstructionTransaction {
+ using pointer = typename AllocatorType::pointer;
+ using size_type = typename AllocatorType::size_type;
+
+ public:
+ explicit ConstructionTransaction(AllocatorType* alloc_ptr)
+ : alloc_data_(*alloc_ptr, nullptr) {}
+
+ ~ConstructionTransaction() {
+ if (DidConstruct()) {
+ inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()),
+ GetData(), GetSize());
+ }
+ }
+
+ ConstructionTransaction(const ConstructionTransaction&) = delete;
+ void operator=(const ConstructionTransaction&) = delete;
+
+ AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+ pointer& GetData() { return alloc_data_.template get<1>(); }
+ size_type& GetSize() { return size_; }
+
+ bool DidConstruct() { return GetData() != nullptr; }
+ template <typename ValueAdapter>
+ void Construct(pointer data, ValueAdapter* values_ptr, size_type size) {
+ inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
+ data, values_ptr, size);
+ GetData() = data;
+ GetSize() = size;
+ }
+ void Commit() {
+ GetData() = nullptr;
+ GetSize() = 0;
+ }
+
+ private:
+ container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
+ size_type size_ = 0;
+};
+
+template <typename T, size_t N, typename A>
+class Storage {
+ public:
+ using allocator_type = A;
+ using value_type = typename allocator_type::value_type;
+ using pointer = typename allocator_type::pointer;
+ using const_pointer = typename allocator_type::const_pointer;
+ using reference = typename allocator_type::reference;
+ using const_reference = typename allocator_type::const_reference;
+ using rvalue_reference = typename allocator_type::value_type&&;
+ using size_type = typename allocator_type::size_type;
+ using difference_type = typename allocator_type::difference_type;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using MoveIterator = std::move_iterator<iterator>;
+ using AllocatorTraits = absl::allocator_traits<allocator_type>;
+ using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
+
+ using StorageView = inlined_vector_internal::StorageView<allocator_type>;
+
+ template <typename Iterator>
+ using IteratorValueAdapter =
+ inlined_vector_internal::IteratorValueAdapter<allocator_type, Iterator>;
+ using CopyValueAdapter =
+ inlined_vector_internal::CopyValueAdapter<allocator_type>;
+ using DefaultValueAdapter =
+ inlined_vector_internal::DefaultValueAdapter<allocator_type>;
+
+ using AllocationTransaction =
+ inlined_vector_internal::AllocationTransaction<allocator_type>;
+ using ConstructionTransaction =
+ inlined_vector_internal::ConstructionTransaction<allocator_type>;
+
+ static size_type NextCapacity(size_type current_capacity) {
+ return current_capacity * 2;
+ }
+
+ static size_type ComputeCapacity(size_type current_capacity,
+ size_type requested_capacity) {
+ return (std::max)(NextCapacity(current_capacity), requested_capacity);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Constructors and Destructor
+ // ---------------------------------------------------------------------------
+
+ Storage() : metadata_() {}
+
+ explicit Storage(const allocator_type& alloc)
+ : metadata_(alloc, /* empty and inlined */ 0) {}
+
+ ~Storage() {
+ pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
+ DeallocateIfAllocated();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Accessors
+ // ---------------------------------------------------------------------------
+
+ size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+
+ const size_type& GetSizeAndIsAllocated() const {
+ return metadata_.template get<1>();
+ }
+
+ size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+
+ bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
+
+ pointer GetAllocatedData() { return data_.allocated.allocated_data; }
+
+ const_pointer GetAllocatedData() const {
+ return data_.allocated.allocated_data;
+ }
+
+ pointer GetInlinedData() {
+ return reinterpret_cast<pointer>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ const_pointer GetInlinedData() const {
+ return reinterpret_cast<const_pointer>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ size_type GetAllocatedCapacity() const {
+ return data_.allocated.allocated_capacity;
+ }
+
+ size_type GetInlinedCapacity() const { return static_cast<size_type>(N); }
+
+ StorageView MakeStorageView() {
+ return GetIsAllocated()
+ ? StorageView{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()}
+ : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()};
+ }
+
+ allocator_type* GetAllocPtr() {
+ return std::addressof(metadata_.template get<0>());
+ }
+
+ const allocator_type* GetAllocPtr() const {
+ return std::addressof(metadata_.template get<0>());
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Mutators
+ // ---------------------------------------------------------------------------
+
+ template <typename ValueAdapter>
+ void Initialize(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ void Assign(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ void Resize(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ iterator Insert(const_iterator pos, ValueAdapter values,
+ size_type insert_count);
+
+ template <typename... Args>
+ reference EmplaceBack(Args&&... args);
+
+ iterator Erase(const_iterator from, const_iterator to);
+
+ void Reserve(size_type requested_capacity);
+
+ void ShrinkToFit();
+
+ void Swap(Storage* other_storage_ptr);
+
+ void SetIsAllocated() {
+ GetSizeAndIsAllocated() |= static_cast<size_type>(1);
+ }
+
+ void UnsetIsAllocated() {
+ GetSizeAndIsAllocated() &= ((std::numeric_limits<size_type>::max)() - 1);
+ }
+
+ void SetSize(size_type size) {
+ GetSizeAndIsAllocated() =
+ (size << 1) | static_cast<size_type>(GetIsAllocated());
+ }
+
+ void SetAllocatedSize(size_type size) {
+ GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
+ }
+
+ void SetInlinedSize(size_type size) {
+ GetSizeAndIsAllocated() = size << static_cast<size_type>(1);
+ }
+
+ void AddSize(size_type count) {
+ GetSizeAndIsAllocated() += count << static_cast<size_type>(1);
+ }
+
+ void SubtractSize(size_type count) {
+ assert(count <= GetSize());
+
+ GetSizeAndIsAllocated() -= count << static_cast<size_type>(1);
+ }
+
+ void SetAllocatedData(pointer data, size_type capacity) {
+ data_.allocated.allocated_data = data;
+ data_.allocated.allocated_capacity = capacity;
+ }
+
+ void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) {
+ SetAllocatedData(allocation_tx_ptr->GetData(),
+ allocation_tx_ptr->GetCapacity());
+ allocation_tx_ptr->GetData() = nullptr;
+ allocation_tx_ptr->GetCapacity() = 0;
+ }
+
+ void MemcpyFrom(const Storage& other_storage) {
+ assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
+
+ GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
+ data_ = other_storage.data_;
+ }
+
+ void DeallocateIfAllocated() {
+ if (GetIsAllocated()) {
+ AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
+ GetAllocatedCapacity());
+ }
+ }
+
+ private:
+ using Metadata =
+ container_internal::CompressedTuple<allocator_type, size_type>;
+
+ struct Allocated {
+ pointer allocated_data;
+ size_type allocated_capacity;
+ };
+
+ struct Inlined {
+ using InlinedDataElement =
+ absl::aligned_storage_t<sizeof(value_type), alignof(value_type)>;
+ InlinedDataElement inlined_data[N];
+ };
+
+ union Data {
+ Allocated allocated;
+ Inlined inlined;
+ };
+
+ Metadata metadata_;
+ Data data_;
+};
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
+ -> void {
+ // Only callable from constructors!
+ assert(!GetIsAllocated());
+ assert(GetSize() == 0);
+
+ pointer construct_data;
+
+ if (new_size > GetInlinedCapacity()) {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
+ size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
+ pointer new_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
+
+ SetAllocatedData(new_data, new_capacity);
+ SetIsAllocated();
+
+ construct_data = new_data;
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+ &values, new_size);
+
+ // Since the initial size was guaranteed to be `0` and the allocated bit is
+ // already correct for either case, *adding* `new_size` gives us the correct
+ // result faster than setting it directly.
+ AddSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ absl::Span<value_type> assign_loop;
+ absl::Span<value_type> construct_loop;
+ absl::Span<value_type> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construct_loop = {new_data, new_size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ assign_loop = {storage_view.data, storage_view.size};
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ assign_loop = {storage_view.data, new_size};
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ inlined_vector_internal::AssignElements(assign_loop.data(), &values,
+ assign_loop.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+ destroy_loop.size());
+
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+ ConstructionTransaction construction_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ absl::Span<value_type> construct_loop;
+ absl::Span<value_type> move_construct_loop;
+ absl::Span<value_type> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+ construct_loop = {new_data + storage_view.size,
+ new_size - storage_view.size};
+ move_construct_loop = {new_data, storage_view.size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ construction_tx.Construct(construct_loop.data(), &values,
+ construct_loop.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), move_construct_loop.data(), &move_values,
+ move_construct_loop.size());
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+ destroy_loop.size());
+
+ construction_tx.Commit();
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
+ size_type insert_count) -> iterator {
+ StorageView storage_view = MakeStorageView();
+
+ size_type insert_index =
+ std::distance(const_iterator(storage_view.data), pos);
+ size_type insert_end_index = insert_index + insert_count;
+ size_type new_size = storage_view.size + insert_count;
+
+ if (new_size > storage_view.capacity) {
+ AllocationTransaction allocation_tx(GetAllocPtr());
+ ConstructionTransaction construction_tx(GetAllocPtr());
+ ConstructionTransaction move_construciton_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construction_tx.Construct(new_data + insert_index, &values, insert_count);
+
+ move_construciton_tx.Construct(new_data, &move_values, insert_index);
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), new_data + insert_end_index, &move_values,
+ storage_view.size - insert_index);
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ construction_tx.Commit();
+ move_construciton_tx.Commit();
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+
+ SetAllocatedSize(new_size);
+ return iterator(new_data + insert_index);
+ } else {
+ size_type move_construction_destination_index =
+ (std::max)(insert_end_index, storage_view.size);
+
+ ConstructionTransaction move_construction_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_construction_values(
+ MoveIterator(storage_view.data +
+ (move_construction_destination_index - insert_count)));
+ absl::Span<value_type> move_construction = {
+ storage_view.data + move_construction_destination_index,
+ new_size - move_construction_destination_index};
+
+ pointer move_assignment_values = storage_view.data + insert_index;
+ absl::Span<value_type> move_assignment = {
+ storage_view.data + insert_end_index,
+ move_construction_destination_index - insert_end_index};
+
+ absl::Span<value_type> insert_assignment = {move_assignment_values,
+ move_construction.size()};
+
+ absl::Span<value_type> insert_construction = {
+ insert_assignment.data() + insert_assignment.size(),
+ insert_count - insert_assignment.size()};
+
+ move_construction_tx.Construct(move_construction.data(),
+ &move_construction_values,
+ move_construction.size());
+
+ for (pointer destination = move_assignment.data() + move_assignment.size(),
+ last_destination = move_assignment.data(),
+ source = move_assignment_values + move_assignment.size();
+ ;) {
+ --destination;
+ --source;
+ if (destination < last_destination) break;
+ *destination = std::move(*source);
+ }
+
+ inlined_vector_internal::AssignElements(insert_assignment.data(), &values,
+ insert_assignment.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), insert_construction.data(), &values,
+ insert_construction.size());
+
+ move_construction_tx.Commit();
+
+ AddSize(insert_count);
+ return iterator(storage_view.data + insert_index);
+ }
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ pointer construct_data;
+
+ if (storage_view.size == storage_view.capacity) {
+ size_type new_capacity = NextCapacity(storage_view.capacity);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construct_data = new_data;
+ } else {
+ construct_data = storage_view.data;
+ }
+
+ pointer end = construct_data + storage_view.size;
+
+ AllocatorTraits::construct(*GetAllocPtr(), end, std::forward<Args>(args)...);
+
+ if (allocation_tx.DidAllocate()) {
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), allocation_tx.GetData(), &move_values,
+ storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ AllocatorTraits::destroy(*GetAllocPtr(), end);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ AddSize(1);
+ return *end;
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
+ -> iterator {
+ assert(from != to);
+
+ StorageView storage_view = MakeStorageView();
+
+ size_type erase_size = std::distance(from, to);
+ size_type erase_index =
+ std::distance(const_iterator(storage_view.data), from);
+ size_type erase_end_index = erase_index + erase_size;
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data + erase_end_index));
+
+ inlined_vector_internal::AssignElements(storage_view.data + erase_index,
+ &move_values,
+ storage_view.size - erase_end_index);
+
+ inlined_vector_internal::DestroyElements(
+ GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
+ erase_size);
+
+ SubtractSize(erase_size);
+ return iterator(storage_view.data + erase_index);
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ size_type new_capacity =
+ ComputeCapacity(storage_view.capacity, requested_capacity);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
+ &move_values, storage_view.size);
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::ShrinkToFit() -> void {
+ // May only be called on allocated instances!
+ assert(GetIsAllocated());
+
+ StorageView storage_view{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
+
+ if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ pointer construct_data;
+
+ if (storage_view.size > GetInlinedCapacity()) {
+ size_type new_capacity = storage_view.size;
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construct_data = new_data;
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+ &move_values, storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ SetAllocatedData(storage_view.data, storage_view.capacity);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
+ storage_view.capacity);
+
+ if (allocation_tx.DidAllocate()) {
+ AcquireAllocatedData(&allocation_tx);
+ } else {
+ UnsetIsAllocated();
+ }
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
+ using std::swap;
+ assert(this != other_storage_ptr);
+
+ if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
+ swap(data_.allocated, other_storage_ptr->data_.allocated);
+ } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
+ Storage* small_ptr = this;
+ Storage* large_ptr = other_storage_ptr;
+ if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
+
+ for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
+ swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
+ }
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+
+ inlined_vector_internal::ConstructElements(
+ large_ptr->GetAllocPtr(),
+ small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
+ large_ptr->GetSize() - small_ptr->GetSize());
+
+ inlined_vector_internal::DestroyElements(
+ large_ptr->GetAllocPtr(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
+ } else {
+ Storage* allocated_ptr = this;
+ Storage* inlined_ptr = other_storage_ptr;
+ if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
+
+ StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
+ allocated_ptr->GetSize(),
+ allocated_ptr->GetAllocatedCapacity()};
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(inlined_ptr->GetInlinedData()));
+
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(
+ inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
+ &move_values, inlined_ptr->GetSize());
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ allocated_ptr->SetAllocatedData(allocated_storage_view.data,
+ allocated_storage_view.capacity);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
+ inlined_ptr->GetInlinedData(),
+ inlined_ptr->GetSize());
+
+ inlined_ptr->SetAllocatedData(allocated_storage_view.data,
+ allocated_storage_view.capacity);
+ }
+
+ swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
+ swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+}
+
+} // namespace inlined_vector_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index f11a6ad2..3924b8aa 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -188,7 +188,7 @@
#endif
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// A type wrapper that instructs `Layout` to use the specific alignment for the
@@ -644,7 +644,8 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
std::string DebugString() const {
const auto offsets = Offsets();
const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
- const std::string types[] = {adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
+ const std::string types[] = {
+ adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
for (size_t i = 0; i != NumOffsets - 1; ++i) {
absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
@@ -734,7 +735,7 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index b9f98471..44d84607 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -28,7 +28,7 @@
#include "absl/types/span.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -46,7 +46,7 @@ Expected Type(Actual val) {
return val;
}
-// Helper class to test different size and alignments.
+// Helper classes to test different size and alignments.
struct alignas(8) Int128 {
uint64_t a, b;
friend bool operator==(Int128 lhs, Int128 rhs) {
@@ -58,6 +58,14 @@ struct alignas(8) Int128 {
}
};
+// int64_t is *not* 8-byte aligned on all platforms!
+struct alignas(8) Int64 {
+ int64_t a;
+ friend bool operator==(Int64 lhs, Int64 rhs) {
+ return lhs.a == rhs.a;
+ }
+};
+
// Properties of types that this test relies on.
static_assert(sizeof(int8_t) == 1, "");
static_assert(alignof(int8_t) == 1, "");
@@ -65,6 +73,8 @@ static_assert(sizeof(int16_t) == 2, "");
static_assert(alignof(int16_t) == 2, "");
static_assert(sizeof(int32_t) == 4, "");
static_assert(alignof(int32_t) == 4, "");
+static_assert(sizeof(Int64) == 8, "");
+static_assert(alignof(Int64) == 8, "");
static_assert(sizeof(Int128) == 16, "");
static_assert(alignof(Int128) == 8, "");
@@ -1282,14 +1292,14 @@ TEST(Layout, OverAligned) {
TEST(Layout, Alignment) {
static_assert(Layout<int8_t>::Alignment() == 1, "");
static_assert(Layout<int32_t>::Alignment() == 4, "");
- static_assert(Layout<int64_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64>::Alignment() == 8, "");
static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, "");
- static_assert(Layout<int8_t, int32_t, int64_t>::Alignment() == 8, "");
- static_assert(Layout<int8_t, int64_t, int32_t>::Alignment() == 8, "");
- static_assert(Layout<int32_t, int8_t, int64_t>::Alignment() == 8, "");
- static_assert(Layout<int32_t, int64_t, int8_t>::Alignment() == 8, "");
- static_assert(Layout<int64_t, int8_t, int32_t>::Alignment() == 8, "");
- static_assert(Layout<int64_t, int32_t, int8_t>::Alignment() == 8, "");
+ static_assert(Layout<int8_t, int32_t, Int64>::Alignment() == 8, "");
+ static_assert(Layout<int8_t, Int64, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, int8_t, Int64>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
}
TEST(Layout, ConstexprPartial) {
@@ -1324,7 +1334,7 @@ void ExpectPoisoned(const unsigned char (&buf)[N],
}
TEST(Layout, PoisonPadding) {
- using L = Layout<int8_t, int64_t, int32_t, Int128>;
+ using L = Layout<int8_t, Int64, int32_t, Int128>;
constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
{
@@ -1553,5 +1563,5 @@ TEST(CompactString, Works) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_hash_policy.h
index e8d89f63..d7581360 100644
--- a/absl/container/internal/node_hash_policy.h
+++ b/absl/container/internal/node_hash_policy.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -40,7 +40,7 @@
#include <utility>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class Reference, class Policy>
@@ -84,7 +84,7 @@ struct node_hash_policy {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_hash_policy_test.cc
index a73c7bba..d53b7364 100644
--- a/absl/container/internal/node_hash_policy_test.cc
+++ b/absl/container/internal/node_hash_policy_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,7 +21,7 @@
#include "absl/container/internal/hash_policy_traits.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -65,5 +65,5 @@ TEST_F(NodeTest, transfer) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h
index 53d4619a..00caa373 100644
--- a/absl/container/internal/raw_hash_map.h
+++ b/absl/container/internal/raw_hash_map.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -19,11 +19,12 @@
#include <type_traits>
#include <utility>
+#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class Policy, class Hash, class Eq, class Alloc>
@@ -40,8 +41,8 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
using MappedConstReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::const_reference>())));
- using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
- IsTransparent<Hash>::value>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
public:
using key_type = typename Policy::key_type;
@@ -137,14 +138,20 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
template <class K = key_type, class P = Policy>
MappedReference<P> at(const key_arg<K>& key) {
auto it = this->find(key);
- if (it == this->end()) std::abort();
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
return Policy::value(&*it);
}
template <class K = key_type, class P = Policy>
MappedConstReference<P> at(const key_arg<K>& key) const {
auto it = this->find(key);
- if (it == this->end()) std::abort();
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
return Policy::value(&*it);
}
@@ -181,7 +188,7 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 4e690dac..02e74e21 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@
#include "absl/base/config.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
constexpr size_t Group::kWidth;
@@ -44,5 +44,5 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
}
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 0c42e4ae..7b379d4f 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -91,36 +91,6 @@
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
-#ifndef SWISSTABLE_HAVE_SSE2
-#if defined(__SSE2__) || \
- (defined(_MSC_VER) && \
- (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define SWISSTABLE_HAVE_SSE2 1
-#else
-#define SWISSTABLE_HAVE_SSE2 0
-#endif
-#endif
-
-#ifndef SWISSTABLE_HAVE_SSSE3
-#ifdef __SSSE3__
-#define SWISSTABLE_HAVE_SSSE3 1
-#else
-#define SWISSTABLE_HAVE_SSSE3 0
-#endif
-#endif
-
-#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
-#error "Bad configuration!"
-#endif
-
-#if SWISSTABLE_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
-#if SWISSTABLE_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
#include <algorithm>
#include <cmath>
#include <cstdint>
@@ -135,18 +105,20 @@
#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h"
#include "absl/base/port.h"
+#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
+#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/have_sse.h"
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
-#include "absl/types/optional.h"
#include "absl/utility/utility.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <size_t Width>
@@ -194,12 +166,6 @@ struct IsDecomposable<
std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
-template <class, class = void>
-struct IsTransparent : std::false_type {};
-template <class T>
-struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
- : std::true_type {};
-
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
constexpr bool IsNoThrowSwappable() {
@@ -385,7 +351,7 @@ struct GroupSse2Impl {
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
#else
- return Match(kEmpty);
+ return Match(static_cast<h2_t>(kEmpty));
#endif
}
@@ -481,9 +447,7 @@ using Group = GroupPortableImpl;
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
-inline bool IsValidCapacity(size_t n) {
- return ((n + 1) & n) == 0 && n >= Group::kWidth - 1;
-}
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// PRECONDITION:
// IsValidCapacity(capacity)
@@ -505,152 +469,32 @@ inline void ConvertDeletedToEmptyAndFullToDeleted(
ctrl[capacity] = kSentinel;
}
-// Rounds up the capacity to the next power of 2 minus 1 and ensures it is
-// greater or equal to Group::kWidth - 1.
+// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) {
- constexpr size_t kMinCapacity = Group::kWidth - 1;
- return n <= kMinCapacity
- ? kMinCapacity
- : (std::numeric_limits<size_t>::max)() >> LeadingZeros(n);
+ return n ? ~size_t{} >> LeadingZeros(n) : 1;
}
-// The node_handle concept from C++17.
-// We specialize node_handle for sets and maps. node_handle_base holds the
-// common API of both.
-template <typename Policy, typename Alloc>
-class node_handle_base {
- protected:
- using PolicyTraits = hash_policy_traits<Policy>;
- using slot_type = typename PolicyTraits::slot_type;
-
- public:
- using allocator_type = Alloc;
-
- constexpr node_handle_base() {}
- node_handle_base(node_handle_base&& other) noexcept {
- *this = std::move(other);
- }
- ~node_handle_base() { destroy(); }
- node_handle_base& operator=(node_handle_base&& other) {
- destroy();
- if (!other.empty()) {
- alloc_ = other.alloc_;
- PolicyTraits::transfer(alloc(), slot(), other.slot());
- other.reset();
- }
- return *this;
- }
-
- bool empty() const noexcept { return !alloc_; }
- explicit operator bool() const noexcept { return !empty(); }
- allocator_type get_allocator() const { return *alloc_; }
-
- protected:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle_base(const allocator_type& a, slot_type* s) : alloc_(a) {
- PolicyTraits::transfer(alloc(), slot(), s);
- }
-
- void destroy() {
- if (!empty()) {
- PolicyTraits::destroy(alloc(), slot());
- reset();
- }
- }
-
- void reset() {
- assert(alloc_.has_value());
- alloc_ = absl::nullopt;
- }
-
- slot_type* slot() const {
- assert(!empty());
- return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
- }
- allocator_type* alloc() { return std::addressof(*alloc_); }
-
- private:
- absl::optional<allocator_type> alloc_;
- mutable absl::aligned_storage_t<sizeof(slot_type), alignof(slot_type)>
- slot_space_;
-};
-
-// For sets.
-template <typename Policy, typename Alloc, typename = void>
-class node_handle : public node_handle_base<Policy, Alloc> {
- using Base = typename node_handle::node_handle_base;
-
- public:
- using value_type = typename Base::PolicyTraits::value_type;
-
- constexpr node_handle() {}
-
- value_type& value() const {
- return Base::PolicyTraits::element(this->slot());
- }
-
- private:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
-};
-
-// For maps.
-template <typename Policy, typename Alloc>
-class node_handle<Policy, Alloc, absl::void_t<typename Policy::mapped_type>>
- : public node_handle_base<Policy, Alloc> {
- using Base = typename node_handle::node_handle_base;
-
- public:
- using key_type = typename Policy::key_type;
- using mapped_type = typename Policy::mapped_type;
-
- constexpr node_handle() {}
-
- auto key() const -> decltype(Base::PolicyTraits::key(this->slot())) {
- return Base::PolicyTraits::key(this->slot());
- }
-
- mapped_type& mapped() const {
- return Base::PolicyTraits::value(
- &Base::PolicyTraits::element(this->slot()));
+// We use 7/8th as maximum load factor.
+// For 16-wide groups, that gives an average of two empty slots per group.
+inline size_t CapacityToGrowth(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ // `capacity*7/8`
+ if (Group::kWidth == 8 && capacity == 7) {
+ // x-x/8 does not work when x==7.
+ return 6;
}
-
- private:
- template <typename, typename, typename, typename>
- friend class raw_hash_set;
-
- node_handle(const Alloc& a, typename Base::slot_type* s) : Base(a, s) {}
-};
-
-// Implement the insert_return_type<> concept of C++17.
-template <class Iterator, class NodeType>
-struct insert_return_type {
- Iterator position;
- bool inserted;
- NodeType node;
-};
-
-// Helper trait to allow or disallow arbitrary keys when the hash and
-// eq functions are transparent.
-// It is very important that the inner template is an alias and that the type it
-// produces is not a dependent type. Otherwise, type deduction would fail.
-template <bool is_transparent>
-struct KeyArg {
- // Transparent. Forward `K`.
- template <typename K, typename key_type>
- using type = K;
-};
-
-template <>
-struct KeyArg<false> {
- // Not transparent. Always use `key_type`.
- template <typename K, typename key_type>
- using type = key_type;
-};
+ return capacity - capacity / 8;
+}
+// From desired "growth" to a lowerbound of the necessary capacity.
+// Might not be a valid one and required NormalizeCapacity().
+inline size_t GrowthToLowerboundCapacity(size_t growth) {
+ // `growth*8/7`
+ if (Group::kWidth == 8 && growth == 7) {
+ // x+(x-1)/7 does not work when x==7.
+ return 8;
+ }
+ return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+}
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
@@ -666,14 +510,14 @@ struct KeyArg<false> {
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
-// Allocator: an Allocator [http://devdocs.io/cpp/concept/allocator] with which
+// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set {
using PolicyTraits = hash_policy_traits<Policy>;
- using KeyArgImpl = container_internal::KeyArg<IsTransparent<Eq>::value &&
- IsTransparent<Hash>::value>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
public:
using init_type = typename PolicyTraits::init_type;
@@ -814,7 +658,11 @@ class raw_hash_set {
}
ctrl_t* ctrl_ = nullptr;
- slot_type* slot_;
+ // To avoid uninitialized member warnigs, put slot_ in an anonymous union.
+ // The member is not initialized on singleton and end iterators.
+ union {
+ slot_type* slot_;
+ };
};
class const_iterator {
@@ -854,7 +702,8 @@ class raw_hash_set {
iterator inner_;
};
- using node_type = container_internal::node_handle<Policy, Alloc>;
+ using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
raw_hash_set() noexcept(
std::is_nothrow_default_constructible<hasher>::value&&
@@ -867,7 +716,7 @@ class raw_hash_set {
: ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
+ reset_growth_left();
initialize_slots();
}
}
@@ -909,8 +758,8 @@ class raw_hash_set {
// that accept std::initializer_list<T> and std::initializer_list<init_type>.
// This is advantageous for performance.
//
- // // Turns {"abc", "def"} into std::initializer_list<std::string>, then copies
- // // the strings into the set.
+ // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
+ // // copies the strings into the set.
// std::unordered_set<std::string> s = {"abc", "def"};
//
// // Turns {"abc", "def"} into std::initializer_list<const char*>, then
@@ -973,9 +822,10 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- const size_t i = find_first_non_full(hash);
- set_ctrl(i, H2(hash));
- emplace_at(i, v);
+ auto target = find_first_non_full(hash);
+ set_ctrl(target.offset, H2(hash));
+ emplace_at(target.offset, v);
+ infoz_.RecordInsert(hash, target.probe_length);
}
size_ = that.size();
growth_left() -= that.size();
@@ -989,6 +839,7 @@ class raw_hash_set {
slots_(absl::exchange(that.slots_, nullptr)),
size_(absl::exchange(that.size_, 0)),
capacity_(absl::exchange(that.capacity_, 0)),
+ infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
@@ -1009,6 +860,7 @@ class raw_hash_set {
std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left());
+ std::swap(infoz_, that.infoz_);
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -1058,7 +910,7 @@ class raw_hash_set {
size_t capacity() const { return capacity_; }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
- void clear() {
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() {
// Iterating over this container is O(bucket_count()). When bucket_count()
// is much greater than size(), iteration becomes prohibitively expensive.
// For clear() it is more important to reuse the allocated array when the
@@ -1076,9 +928,10 @@ class raw_hash_set {
}
size_ = 0;
reset_ctrl();
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor);
+ reset_growth_left();
}
assert(empty());
+ infoz_.RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -1117,7 +970,7 @@ class raw_hash_set {
// This overload kicks in when the argument is an rvalue of init_type. Its
// purpose is to handle brace-init-list arguments.
//
- // flat_hash_set<std::string, int> s;
+ // flat_hash_map<std::string, int> s;
// s.insert({"abc", 42});
std::pair<iterator, bool> insert(init_type&& value) {
return emplace(std::move(value));
@@ -1158,13 +1011,14 @@ class raw_hash_set {
insert(ilist.begin(), ilist.end());
}
- insert_return_type<iterator, node_type> insert(node_type&& node) {
+ insert_return_type insert(node_type&& node) {
if (!node) return {end(), false, node_type()};
- const auto& elem = PolicyTraits::element(node.slot());
+ const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
auto res = PolicyTraits::apply(
- InsertSlot<false>{*this, std::move(*node.slot())}, elem);
+ InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
+ elem);
if (res.second) {
- node.reset();
+ CommonAccess::Reset(&node);
return {res.first, true, node_type()};
} else {
return {res.first, false, std::move(node)};
@@ -1328,7 +1182,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- node_type node(alloc_ref(), position.inner_.slot_);
+ auto node =
+ CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
return node;
}
@@ -1353,6 +1208,7 @@ class raw_hash_set {
swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
+ swap(infoz_, that.infoz_);
if (AllocTraits::propagate_on_container_swap::value) {
swap(alloc_ref(), that.alloc_ref());
} else {
@@ -1363,17 +1219,21 @@ class raw_hash_set {
void rehash(size_t n) {
if (n == 0 && capacity_ == 0) return;
- if (n == 0 && size_ == 0) return destroy_slots();
- auto m = NormalizeCapacity(std::max(n, NumSlotsFast(size())));
+ if (n == 0 && size_ == 0) {
+ destroy_slots();
+ infoz_.RecordStorageChanged(0, 0);
+ return;
+ }
+ // bitor is a faster way of doing `max` here. We will round up to the next
+ // power-of-2-minus-1, so bitor is good enough.
+ auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity_) {
resize(m);
}
}
- void reserve(size_t n) {
- rehash(NumSlotsFast(n));
- }
+ void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
// Extension API: support for heterogeneous keys.
//
@@ -1551,13 +1411,6 @@ class raw_hash_set {
slot_type&& slot;
};
- // Computes std::ceil(n / kMaxLoadFactor). Faster than calling std::ceil.
- static inline size_t NumSlotsFast(size_t n) {
- return static_cast<size_t>(
- (n * kMaxLoadFactorDenominator + (kMaxLoadFactorNumerator - 1)) /
- kMaxLoadFactorNumerator);
- }
-
// "erases" the object from the container, except that it doesn't actually
// destroy the object. It only updates all the metadata of the class.
// This can be used in conjunction with Policy::transfer to move the object to
@@ -1580,17 +1433,34 @@ class raw_hash_set {
set_ctrl(index, was_never_full ? kEmpty : kDeleted);
growth_left() += was_never_full;
+ infoz_.RecordErase();
}
void initialize_slots() {
assert(capacity_);
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or wont make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ //
+ // People are often sloppy with the exact type of their allocator (sometimes
+ // it has an extra const or is missing the pair, but rebinds made it work
+ // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
+ // bound more carefully.
+ if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
+ slots_ == nullptr) {
+ infoz_ = Sample();
+ }
+
auto layout = MakeLayout(capacity_);
char* mem = static_cast<char*>(
Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
slots_ = layout.template Pointer<1>(mem);
reset_ctrl();
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ reset_growth_left();
+ infoz_.RecordStorageChanged(size_, capacity_);
}
void destroy_slots() {
@@ -1619,11 +1489,14 @@ class raw_hash_set {
capacity_ = new_capacity;
initialize_slots();
+ size_t total_probe_length = 0;
for (size_t i = 0; i != old_capacity; ++i) {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- size_t new_i = find_first_non_full(hash);
+ auto target = find_first_non_full(hash);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
set_ctrl(new_i, H2(hash));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
}
@@ -1635,10 +1508,12 @@ class raw_hash_set {
Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
layout.AllocSize());
}
+ infoz_.RecordRehash(total_probe_length);
}
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
+ assert(!is_small());
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@@ -1658,12 +1533,15 @@ class raw_hash_set {
ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
raw;
+ size_t total_probe_length = 0;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
if (!IsDeleted(ctrl_[i])) continue;
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(slots_ + i));
- size_t new_i = find_first_non_full(hash);
+ auto target = find_first_non_full(hash);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
// Verify if the old and new i fall within the same group wrt the hash.
// If they do, we don't need to move the object as it falls already in the
@@ -1695,13 +1573,14 @@ class raw_hash_set {
--i; // repeat
}
}
- growth_left() = static_cast<size_t>(capacity_ * kMaxLoadFactor) - size_;
+ reset_growth_left();
+ infoz_.RecordRehash(total_probe_length);
}
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
- resize(Group::kWidth - 1);
- } else if (size() <= kMaxLoadFactor / 2 * capacity_) {
+ resize(1);
+ } else if (size() <= CapacityToGrowth(capacity()) / 2) {
// Squash DELETED without growing if there is enough capacity.
drop_deletes_without_resize();
} else {
@@ -1736,24 +1615,26 @@ class raw_hash_set {
// - the input is already a set
// - there are enough slots
// - the element with the hash is not in the table
- size_t find_first_non_full(size_t hash) {
+ struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+ };
+ FindInfo find_first_non_full(size_t hash) {
auto seq = probe(hash);
while (true) {
Group g{ctrl_ + seq.offset()};
auto mask = g.MatchEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
- // We want to force small tables to have random entries too, so
- // in debug build we will randomly insert in either the front or back of
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (ShouldInsertBackwards(hash, ctrl_))
- return seq.offset(mask.HighestBitSet());
- else
- return seq.offset(mask.LowestBitSet());
-#else
- return seq.offset(mask.LowestBitSet());
+ if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
}
assert(seq.index() < capacity_ && "full table!");
seq.next();
@@ -1792,15 +1673,17 @@ class raw_hash_set {
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- size_t target = find_first_non_full(hash);
- if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target]))) {
+ auto target = find_first_non_full(hash);
+ if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
+ !IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary();
target = find_first_non_full(hash);
}
++size_;
- growth_left() -= IsEmpty(ctrl_[target]);
- set_ctrl(target, H2(hash));
- return target;
+ growth_left() -= IsEmpty(ctrl_[target.offset]);
+ set_ctrl(target.offset, H2(hash));
+ infoz_.RecordInsert(hash, target.probe_length);
+ return target.offset;
}
// Constructs the value in the space pointed by the iterator. This only works
@@ -1838,6 +1721,10 @@ class raw_hash_set {
SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
}
+ void reset_growth_left() {
+ growth_left() = CapacityToGrowth(capacity()) - size_;
+ }
+
// Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
// the end too.
void set_ctrl(size_t i, ctrl_t h) {
@@ -1850,11 +1737,28 @@ class raw_hash_set {
}
ctrl_[i] = h;
- ctrl_[((i - Group::kWidth) & capacity_) + Group::kWidth] = h;
+ ctrl_[((i - Group::kWidth) & capacity_) + 1 +
+ ((Group::kWidth - 1) & capacity_)] = h;
}
size_t& growth_left() { return settings_.template get<0>(); }
+ // The representation of the object has two modes:
+ // - small: For capacities < kWidth-1
+ // - large: For the rest.
+ //
+ // Differences:
+ // - In small mode we are able to use the whole capacity. The extra control
+ // bytes give us at least one "empty" control byte to stop the iteration.
+ // This is important to make 1 a valid capacity.
+ //
+ // - In small mode only the first `capacity()` control bytes after the
+ // sentinel are valid. The rest contain dummy kEmpty values that do not
+ // represent a real slot. This is important to take into account on
+ // find_first_non_full(), where we never try ShouldInsertBackwards() for
+ // small tables.
+ bool is_small() const { return capacity_ < Group::kWidth - 1; }
+
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); }
@@ -1864,12 +1768,6 @@ class raw_hash_set {
return settings_.template get<3>();
}
- // On average each group has 2 empty slot (for the vectorized case).
- static constexpr int64_t kMaxLoadFactorNumerator = 14;
- static constexpr int64_t kMaxLoadFactorDenominator = 16;
- static constexpr float kMaxLoadFactor =
- 1.0 * kMaxLoadFactorNumerator / kMaxLoadFactorDenominator;
-
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
@@ -1877,6 +1775,7 @@ class raw_hash_set {
slot_type* slots_ = nullptr; // [capacity * slot_type]
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
+ HashtablezInfoHandle infoz_;
absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
key_equal, allocator_type>
settings_{0, hasher{}, key_equal{}, allocator_type{}};
@@ -1929,10 +1828,9 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
}
static size_t LowerBoundAllocatedByteSize(size_t size) {
- size_t capacity = container_internal::NormalizeCapacity(
- std::ceil(size / Set::kMaxLoadFactor));
+ size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(capacity);
+ auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
size_t m = layout.AllocSize();
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
@@ -1944,7 +1842,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
} // namespace hashtable_debug_internal
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index f5779d62..5188b3ae 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@
#include "absl/container/internal/tracked.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -426,5 +426,5 @@ TEST_F(PropagateOnAll, Swap) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 302f9758..2783f5c4 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -35,7 +35,7 @@
#include "absl/strings/string_view.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
struct RawHashSetTestOnlyAccess {
@@ -49,18 +49,47 @@ namespace {
using ::testing::DoubleNear;
using ::testing::ElementsAre;
+using ::testing::Ge;
+using ::testing::Lt;
using ::testing::Optional;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(Util, NormalizeCapacity) {
- constexpr size_t kMinCapacity = Group::kWidth - 1;
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(0));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(1));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(2));
- EXPECT_EQ(kMinCapacity, NormalizeCapacity(kMinCapacity));
- EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 1));
- EXPECT_EQ(kMinCapacity * 2 + 1, NormalizeCapacity(kMinCapacity + 2));
+ EXPECT_EQ(1, NormalizeCapacity(0));
+ EXPECT_EQ(1, NormalizeCapacity(1));
+ EXPECT_EQ(3, NormalizeCapacity(2));
+ EXPECT_EQ(3, NormalizeCapacity(3));
+ EXPECT_EQ(7, NormalizeCapacity(4));
+ EXPECT_EQ(7, NormalizeCapacity(7));
+ EXPECT_EQ(15, NormalizeCapacity(8));
+ EXPECT_EQ(15, NormalizeCapacity(15));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
+}
+
+TEST(Util, GrowthAndCapacity) {
+ // Verify that GrowthToCapacity gives the minimum capacity that has enough
+ // growth.
+ for (size_t growth = 0; growth < 10000; ++growth) {
+ SCOPED_TRACE(growth);
+ size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
+ // The capacity is large enough for `growth`
+ EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+ if (growth != 0 && capacity > 1) {
+ // There is no smaller capacity that works.
+ EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
+ }
+ }
+
+ for (size_t capacity = Group::kWidth - 1; capacity < 10000;
+ capacity = 2 * capacity + 1) {
+ SCOPED_TRACE(capacity);
+ size_t growth = CapacityToGrowth(capacity);
+ EXPECT_THAT(growth, Lt(capacity));
+ EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
+ EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
+ }
}
TEST(Util, probe_seq) {
@@ -107,14 +136,14 @@ TEST(BitMask, WithShift) {
}
TEST(BitMask, LeadingTrailing) {
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).LeadingZeros()), 3);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0001101001000000).TrailingZeros()), 6);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).LeadingZeros()), 15);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b0000000000000001).TrailingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).LeadingZeros()), 0);
- EXPECT_EQ((BitMask<uint32_t, 16>(0b1000000000000000).TrailingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
@@ -315,7 +344,25 @@ struct IntTable
: raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
std::equal_to<int64_t>, std::allocator<int64_t>> {
using Base = typename IntTable::raw_hash_set;
- IntTable() {}
+ using Base::Base;
+};
+
+template <typename T>
+struct CustomAlloc : std::allocator<T> {
+ CustomAlloc() {}
+
+ template <typename U>
+ CustomAlloc(const CustomAlloc<U>& other) {}
+
+ template<class U> struct rebind {
+ using other = CustomAlloc<U>;
+ };
+};
+
+struct CustomAllocIntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, CustomAlloc<int64_t>> {
+ using Base = typename CustomAllocIntTable::raw_hash_set;
using Base::Base;
};
@@ -343,6 +390,7 @@ TEST(Table, EmptyFunctorOptimization) {
size_t size;
size_t capacity;
size_t growth_left;
+ void* infoz;
};
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
@@ -385,10 +433,11 @@ TEST(Table, Prefetch) {
t.prefetch(2);
// Do not run in debug mode, when prefetch is not implemented, or when
- // sanitizers are enabled.
-#if defined(NDEBUG) && defined(__GNUC__) && !defined(ADDRESS_SANITIZER) && \
- !defined(MEMORY_SANITIZER) && !defined(THREAD_SANITIZER) && \
- !defined(UNDEFINED_BEHAVIOR_SANITIZER)
+ // sanitizers are enabled, or on WebAssembly.
+#if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) && \
+ !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
+ !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \
+ !defined(__EMSCRIPTEN__)
const auto now = [] { return absl::base_internal::CycleClock::Now(); };
// Make size enough to not fit in L2 cache (16.7 Mb)
@@ -785,7 +834,7 @@ TEST(Table, EnsureNonQuadraticAsInRust) {
TEST(Table, ClearBug) {
IntTable t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
- constexpr size_t max_size = capacity / 2;
+ constexpr size_t max_size = capacity / 2 + 1;
for (size_t i = 0; i < max_size; ++i) {
t.insert(i);
}
@@ -816,6 +865,25 @@ TEST(Table, Erase) {
EXPECT_TRUE(t.find(0) == t.end());
}
+TEST(Table, EraseMaintainsValidIterator) {
+ IntTable t;
+ const int kNumElements = 100;
+ for (int i = 0; i < kNumElements; i ++) {
+ EXPECT_TRUE(t.emplace(i).second);
+ }
+ EXPECT_EQ(t.size(), kNumElements);
+
+ int num_erase_calls = 0;
+ auto it = t.begin();
+ while (it != t.end()) {
+ t.erase(it++);
+ num_erase_calls++;
+ }
+
+ EXPECT_TRUE(t.empty());
+ EXPECT_EQ(num_erase_calls, kNumElements);
+}
+
// Collect N bad keys by following algorithm:
// 1. Create an empty table and reserve it to 2 * N.
// 2. Insert N random elements.
@@ -1014,7 +1082,7 @@ ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys
ExpectedStats XorSeedExpectedStats() {
constexpr bool kRandomizesInserts =
-#if NDEBUG
+#ifdef NDEBUG
false;
#else // NDEBUG
true;
@@ -1051,6 +1119,7 @@ ExpectedStats XorSeedExpectedStats() {
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
return {};
}
+
TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1107,7 +1176,7 @@ ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
ExpectedStats LinearTransformExpectedStats() {
constexpr bool kRandomizesInserts =
-#if NDEBUG
+#ifdef NDEBUG
false;
#else // NDEBUG
true;
@@ -1144,6 +1213,7 @@ ExpectedStats LinearTransformExpectedStats() {
ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
return {};
}
+
TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1296,37 +1366,31 @@ TEST(Table, ConstructFromInitList) {
TEST(Table, CopyConstruct) {
IntTable t;
- t.max_load_factor(.321f);
t.emplace(0);
EXPECT_EQ(1, t.size());
{
IntTable u(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u{t};
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
{
IntTable u = t;
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find(0), 0);
}
}
TEST(Table, CopyConstructWithAlloc) {
StringTable t;
- t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(t, Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
@@ -1344,94 +1408,75 @@ TEST(Table, AllocWithExplicitCtor) {
TEST(Table, MoveConstruct) {
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t));
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u{std::move(t)};
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
{
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u = std::move(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
}
TEST(Table, MoveConstructWithAlloc) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopyAssign) {
StringTable t;
- t.max_load_factor(.321f);
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = t;
EXPECT_EQ(1, u.size());
- EXPECT_EQ(t.max_load_factor(), u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, CopySelfAssign) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
t = *&t;
EXPECT_EQ(1, t.size());
- EXPECT_EQ(lf, t.max_load_factor());
EXPECT_THAT(*t.find("a"), Pair("a", "b"));
}
TEST(Table, MoveAssign) {
StringTable t;
- t.max_load_factor(.321f);
- const float lf = t.max_load_factor();
t.emplace("a", "b");
EXPECT_EQ(1, t.size());
StringTable u;
u = std::move(t);
EXPECT_EQ(1, u.size());
- EXPECT_EQ(lf, u.max_load_factor());
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
TEST(Table, Equality) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v = {{"a", "b"}, {"aa", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
+ {"aa", "bb"}};
t.insert(std::begin(v), std::end(v));
StringTable u = t;
EXPECT_EQ(u, t);
@@ -1439,20 +1484,24 @@ TEST(Table, Equality) {
TEST(Table, Equality2) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"}, {"aa", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
+ {"aa", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
- std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
TEST(Table, Equality3) {
StringTable t;
- std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"}, {"bb", "bb"}};
+ std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
+ {"bb", "bb"}};
t.insert(std::begin(v1), std::end(v1));
StringTable u;
- std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"}, {"aa", "aa"}};
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
u.insert(std::begin(v2), std::end(v2));
EXPECT_NE(u, t);
}
@@ -1677,7 +1726,7 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node.empty());
StringTable t2;
- auto res = t2.insert(std::move(node));
+ StringTable::insert_return_type res = t2.insert(std::move(node));
EXPECT_TRUE(res.inserted);
EXPECT_THAT(*res.position, Pair(k0, ""));
EXPECT_FALSE(res.node);
@@ -1707,80 +1756,74 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node);
}
-StringTable MakeSimpleTable(size_t size) {
- StringTable t;
- for (size_t i = 0; i < size; ++i) t.emplace(std::string(1, 'A' + i), "");
+IntTable MakeSimpleTable(size_t size) {
+ IntTable t;
+ while (t.size() < size) t.insert(t.size());
return t;
}
-std::string OrderOfIteration(const StringTable& t) {
- std::string order;
- for (auto& p : t) order += p.first;
- return order;
+std::vector<int> OrderOfIteration(const IntTable& t) {
+ return {t.begin(), t.end()};
}
+// These IterationOrderChanges tests depend on non-deterministic behavior.
+// We are injecting non-determinism from the pointer of the table, but do so in
+// a way that only the page matters. We have to retry enough times to make sure
+// we are touching different memory pages to cause the ordering to change.
+// We also need to keep the old tables around to avoid getting the same memory
+// blocks over and over.
TEST(Table, IterationOrderChangesByInstance) {
- // Needs to be more than kWidth elements to be able to affect order.
- const StringTable reference = MakeSimpleTable(20);
-
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed. It should not take many tries
- // for that.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- std::vector<StringTable> garbage;
- for (int i = 0; i < 10; ++i) {
- auto trial = MakeSimpleTable(20);
- if (OrderOfIteration(trial) != OrderOfIteration(reference)) {
- // We are done.
- return;
+ for (size_t size : {2, 6, 12, 20}) {
+ const auto reference_table = MakeSimpleTable(size);
+ const auto reference = OrderOfIteration(reference_table);
+
+ std::vector<IntTable> tables;
+ bool found_difference = false;
+ for (int i = 0; !found_difference && i < 5000; ++i) {
+ tables.push_back(MakeSimpleTable(size));
+ found_difference = OrderOfIteration(tables.back()) != reference;
+ }
+ if (!found_difference) {
+ FAIL()
+ << "Iteration order remained the same across many attempts with size "
+ << size;
}
- garbage.push_back(std::move(trial));
}
- FAIL();
}
TEST(Table, IterationOrderChangesOnRehash) {
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed. It should not take many tries
- // for that.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- std::vector<StringTable> garbage;
- for (int i = 0; i < 10; ++i) {
- // Needs to be more than kWidth elements to be able to affect order.
- StringTable t = MakeSimpleTable(20);
- const std::string reference = OrderOfIteration(t);
+ std::vector<IntTable> garbage;
+ for (int i = 0; i < 5000; ++i) {
+ auto t = MakeSimpleTable(20);
+ const auto reference = OrderOfIteration(t);
// Force rehash to the same size.
t.rehash(0);
- std::string trial = OrderOfIteration(t);
+ auto trial = OrderOfIteration(t);
if (trial != reference) {
// We are done.
return;
}
garbage.push_back(std::move(t));
}
- FAIL();
+ FAIL() << "Iteration order remained the same across many attempts.";
}
-TEST(Table, IterationOrderChangesForSmallTables) {
- // Since order is non-deterministic we can't just try once and verify.
- // We'll try until we find that order changed.
- // Important: we have to keep the old tables around. Otherwise tcmalloc will
- // just give us the same blocks and we would be doing the same order again.
- StringTable reference_table = MakeSimpleTable(5);
- const std::string reference = OrderOfIteration(reference_table);
- std::vector<StringTable> garbage;
- for (int i = 0; i < 50; ++i) {
- StringTable t = MakeSimpleTable(5);
- std::string trial = OrderOfIteration(t);
- if (trial != reference) {
- // We are done.
- return;
- }
- garbage.push_back(std::move(t));
- }
- FAIL() << "Iteration order remained the same across many attempts.";
+// Verify that pointers are invalidated as soon as a second element is inserted.
+// This prevents dependency on pointer stability on small tables.
+TEST(Table, UnstablePointers) {
+ IntTable table;
+
+ const auto addr = [&](int i) {
+ return reinterpret_cast<uintptr_t>(&*table.find(i));
+ };
+
+ table.insert(0);
+ const uintptr_t old_ptr = addr(0);
+
+ // This causes a rehash.
+ table.insert(1);
+
+ EXPECT_NE(old_ptr, addr(0));
}
// Confirm that we assert if we try to erase() end().
@@ -1799,9 +1842,52 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
+TEST(RawHashSamplerTest, Sample) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<IntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.01, 0.005);
+}
+
+TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<CustomAllocIntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.00, 0.001);
+}
+
#ifdef ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
+ t.reserve(5);
// Insert something to force an allocation.
int64_t& v1 = *t.insert(0).first;
@@ -1826,5 +1912,5 @@ TEST(Sanitizer, PoisoningOnErase) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/test_instance_tracker.cc b/absl/container/internal/test_instance_tracker.cc
index 91441729..f4b283fd 100644
--- a/absl/container/internal/test_instance_tracker.cc
+++ b/absl/container/internal/test_instance_tracker.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,7 +15,7 @@
#include "absl/container/internal/test_instance_tracker.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace test_internal {
int BaseCountedInstance::num_instances_ = 0;
int BaseCountedInstance::num_live_instances_ = 0;
@@ -25,5 +25,5 @@ int BaseCountedInstance::num_swaps_ = 0;
int BaseCountedInstance::num_comparisons_ = 0;
} // namespace test_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/test_instance_tracker.h b/absl/container/internal/test_instance_tracker.h
index 060077d0..ab7f9f22 100644
--- a/absl/container/internal/test_instance_tracker.h
+++ b/absl/container/internal/test_instance_tracker.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,11 +18,13 @@
#include <cstdlib>
#include <ostream>
+#include "absl/types/compare.h"
+
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace test_internal {
-// A type that counts number of occurences of the type, the live occurrences of
+// A type that counts number of occurrences of the type, the live occurrences of
// the type, as well as the number of copies, moves, swaps, and comparisons that
// have occurred on the type. This is used as a base class for the copyable,
// copyable+movable, and movable types below that are used in actual tests. Use
@@ -97,6 +99,14 @@ class BaseCountedInstance {
return value_ >= x.value_;
}
+ absl::weak_ordering compare(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_
+ ? absl::weak_ordering::less
+ : value_ == x.value_ ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+ }
+
int value() const {
if (!is_live_) std::abort();
return value_;
@@ -258,7 +268,7 @@ class MovableOnlyInstance : public BaseCountedInstance {
};
} // namespace test_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/absl/container/internal/test_instance_tracker_test.cc b/absl/container/internal/test_instance_tracker_test.cc
index 0ae57636..1c6a4fa7 100644
--- a/absl/container/internal/test_instance_tracker_test.cc
+++ b/absl/container/internal/test_instance_tracker_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -174,6 +174,8 @@ TEST(TestInstanceTracker, Comparisons) {
EXPECT_EQ(5, tracker.comparisons());
EXPECT_FALSE(one >= two);
EXPECT_EQ(6, tracker.comparisons());
+ EXPECT_TRUE(one.compare(two) < 0); // NOLINT
+ EXPECT_EQ(7, tracker.comparisons());
tracker.ResetCopiesMovesSwaps();
EXPECT_EQ(0, tracker.comparisons());
diff --git a/absl/container/internal/tracked.h b/absl/container/internal/tracked.h
index f72c46ea..e9e6b95d 100644
--- a/absl/container/internal/tracked.h
+++ b/absl/container/internal/tracked.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -20,7 +20,7 @@
#include <utility>
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
// A class that tracks its copies and moves so that it can be queried in tests.
@@ -74,7 +74,7 @@ class Tracked {
};
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
diff --git a/absl/container/internal/unordered_map_constructor_test.h b/absl/container/internal/unordered_map_constructor_test.h
index 14ceeecb..b64b5520 100644
--- a/absl/container/internal/unordered_map_constructor_test.h
+++ b/absl/container/internal/unordered_map_constructor_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -24,13 +24,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class ConstructorTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ConstructorTest);
+TYPED_TEST_SUITE_P(ConstructorTest);
TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
@@ -84,8 +84,28 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+template <typename T>
+struct is_std_unordered_map : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
+
#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
@@ -93,11 +113,17 @@ TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
@@ -108,18 +134,38 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketAlloc) {
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
@@ -141,8 +187,11 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
@@ -153,11 +202,17 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -171,7 +226,10 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, CopyConstructor) {
@@ -191,8 +249,11 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) {
EXPECT_EQ(m, n);
}
-TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -207,7 +268,10 @@ TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on copy constructors.
@@ -230,8 +294,11 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) {
EXPECT_EQ(m, n);
}
-TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -247,7 +314,10 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on move constructors.
@@ -270,8 +340,11 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
@@ -281,11 +354,17 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
-#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -298,7 +377,10 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, Assignment) {
@@ -391,17 +473,17 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
REGISTER_TYPED_TEST_CASE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
- BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
- BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
- MoveAssignment, AssignmentFromInitializerList,
- AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
diff --git a/absl/container/internal/unordered_map_lookup_test.h b/absl/container/internal/unordered_map_lookup_test.h
index d767aa8d..9ad78a79 100644
--- a/absl/container/internal/unordered_map_lookup_test.h
+++ b/absl/container/internal/unordered_map_lookup_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class LookupTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(LookupTest);
+TYPED_TEST_SUITE_P(LookupTest);
TYPED_TEST_P(LookupTest, At) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -111,7 +111,7 @@ REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
EqualRange);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
diff --git a/absl/container/internal/unordered_map_members_test.h b/absl/container/internal/unordered_map_members_test.h
new file mode 100644
index 00000000..c4600405
--- /dev/null
+++ b/absl/container/internal/unordered_map_members_test.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+template <class UnordMap>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
+ typename TypeParam::mapped_type>,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h
index 5d7f1fe3..89dd7894 100644
--- a/absl/container/internal/unordered_map_modifiers_test.h
+++ b/absl/container/internal/unordered_map_modifiers_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class ModifiersTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ModifiersTest);
+TYPED_TEST_SUITE_P(ModifiersTest);
TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -269,7 +269,7 @@ REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
Erase, EraseRange, EraseKey, Swap);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
diff --git a/absl/container/internal/unordered_map_test.cc b/absl/container/internal/unordered_map_test.cc
index 548f69f7..51a90af8 100644
--- a/absl/container/internal/unordered_map_test.cc
+++ b/absl/container/internal/unordered_map_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,10 +16,11 @@
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -30,11 +31,12 @@ using MapTypes = ::testing::Types<
StatefulTestingEqual,
Alloc<std::pair<const std::string, std::string>>>>;
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ConstructorTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, LookupTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedMap, ModifiersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ModifiersTest, MapTypes);
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/internal/unordered_set_constructor_test.h b/absl/container/internal/unordered_set_constructor_test.h
index f370b249..ac73a896 100644
--- a/absl/container/internal/unordered_set_constructor_test.h
+++ b/absl/container/internal/unordered_set_constructor_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,21 +16,23 @@
#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
#include <algorithm>
+#include <unordered_set>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/meta/type_traits.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordMap>
class ConstructorTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ConstructorTest);
+TYPED_TEST_SUITE_P(ConstructorTest);
TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
@@ -92,8 +94,28 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
EXPECT_GE(cm.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+template <typename T>
+struct is_std_unordered_set : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
+
#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
@@ -101,11 +123,17 @@ TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
@@ -116,18 +144,38 @@ TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, BucketAlloc) {
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
@@ -149,8 +197,11 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
@@ -161,11 +212,17 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -179,7 +236,10 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
TYPED_TEST_P(ConstructorTest, CopyConstructor) {
@@ -197,10 +257,14 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
+ EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
}
-TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -215,7 +279,10 @@ TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on copy constructors.
@@ -238,8 +305,11 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) {
EXPECT_EQ(m, n);
}
-TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
-#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -255,7 +325,10 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
-#endif
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}
// TODO(alkis): Test non-propagating allocators on move constructors.
@@ -278,8 +351,11 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
EXPECT_GE(m.bucket_count(), 123);
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
@@ -289,11 +365,17 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
-#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
@@ -306,10 +388,13 @@ TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
-#endif
}
-TYPED_TEST_P(ConstructorTest, Assignment) {
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyAssignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -395,17 +480,17 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
REGISTER_TYPED_TEST_CASE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
- BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc,
- BucketAlloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
- InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
- MoveAssignment, AssignmentFromInitializerList,
- AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
diff --git a/absl/container/internal/unordered_set_lookup_test.h b/absl/container/internal/unordered_set_lookup_test.h
index 9174279a..722fb1c2 100644
--- a/absl/container/internal/unordered_set_lookup_test.h
+++ b/absl/container/internal/unordered_set_lookup_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordSet>
class LookupTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(LookupTest);
+TYPED_TEST_SUITE_P(LookupTest);
TYPED_TEST_P(LookupTest, Count) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -85,7 +85,7 @@ TYPED_TEST_P(LookupTest, EqualRange) {
REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
diff --git a/absl/container/internal/unordered_set_members_test.h b/absl/container/internal/unordered_set_members_test.h
new file mode 100644
index 00000000..756a95cb
--- /dev/null
+++ b/absl/container/internal/unordered_set_members_test.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+inline namespace lts_2019_08_08 {
+namespace container_internal {
+
+template <class UnordSet>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+} // inline namespace lts_2019_08_08
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
diff --git a/absl/container/internal/unordered_set_modifiers_test.h b/absl/container/internal/unordered_set_modifiers_test.h
index 0a1e9b1b..d3e534d3 100644
--- a/absl/container/internal/unordered_set_modifiers_test.h
+++ b/absl/container/internal/unordered_set_modifiers_test.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,13 +21,13 @@
#include "absl/container/internal/hash_policy_testing.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class UnordSet>
class ModifiersTest : public ::testing::Test {};
-TYPED_TEST_CASE_P(ModifiersTest);
+TYPED_TEST_SUITE_P(ModifiersTest);
TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
@@ -184,7 +184,7 @@ REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
EraseKey, Swap);
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
diff --git a/absl/container/internal/unordered_set_test.cc b/absl/container/internal/unordered_set_test.cc
index 263059eb..2356e187 100644
--- a/absl/container/internal/unordered_set_test.cc
+++ b/absl/container/internal/unordered_set_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,24 +16,26 @@
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
-using SetTypes =
- ::testing::Types<std::unordered_set<int, StatefulTestingHash,
- StatefulTestingEqual, Alloc<int>>,
- std::unordered_set<std::string, StatefulTestingHash,
- StatefulTestingEqual, Alloc<std::string>>>;
+using SetTypes = ::testing::Types<
+ std::unordered_set<int, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<int>>,
+ std::unordered_set<std::string, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::string>>>;
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ConstructorTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, LookupTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(UnorderedSet, ModifiersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ModifiersTest, SetTypes);
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h
index 48c7752e..addf120f 100644
--- a/absl/container/node_hash_map.h
+++ b/absl/container/node_hash_map.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -48,7 +48,7 @@
#include "absl/memory/memory.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <class Key, class Value>
class NodeHashMapPolicy;
@@ -578,7 +578,7 @@ struct IsUnorderedContainer<
} // namespace container_algorithm_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_NODE_HASH_MAP_H_
diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc
index 76a387b8..7ce7ca02 100644
--- a/absl/container/node_hash_map_test.cc
+++ b/absl/container/node_hash_map_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,10 +17,11 @@
#include "absl/container/internal/tracked.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
@@ -35,9 +36,10 @@ using MapTypes = ::testing::Types<
StatefulTestingEqual,
Alloc<std::pair<const std::string, std::string>>>>;
-INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, ConstructorTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, LookupTest, MapTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(NodeHashMap, ModifiersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, ModifiersTest, MapTypes);
using M = absl::node_hash_map<std::string, Tracked<int>>;
@@ -216,5 +218,5 @@ TEST(NodeHashMap, MergeExtractInsert) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h
index c4179195..103d32d2 100644
--- a/absl/container/node_hash_set.h
+++ b/absl/container/node_hash_set.h
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -44,7 +44,7 @@
#include "absl/memory/memory.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
template <typename T>
struct NodeHashSetPolicy;
@@ -484,7 +484,7 @@ struct IsUnorderedContainer<absl::node_hash_set<Key, Hash, KeyEqual, Allocator>>
: std::true_type {};
} // namespace container_algorithm_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_NODE_HASH_SET_H_
diff --git a/absl/container/node_hash_set_test.cc b/absl/container/node_hash_set_test.cc
index 59f25285..65d125ed 100644
--- a/absl/container/node_hash_set_test.cc
+++ b/absl/container/node_hash_set_test.cc
@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,10 +16,11 @@
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
namespace absl {
-inline namespace lts_2018_12_18 {
+inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
using ::absl::container_internal::hash_internal::Enum;
@@ -30,14 +31,15 @@ using ::testing::UnorderedElementsAre;
using SetTypes = ::testing::Types<
node_hash_set<int, StatefulTestingHash, StatefulTestingEqual, Alloc<int>>,
node_hash_set<std::string, StatefulTestingHash, StatefulTestingEqual,
- Alloc<int>>,
+ Alloc<std::string>>,
node_hash_set<Enum, StatefulTestingHash, StatefulTestingEqual, Alloc<Enum>>,
node_hash_set<EnumClass, StatefulTestingHash, StatefulTestingEqual,
Alloc<EnumClass>>>;
-INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, ConstructorTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, LookupTest, SetTypes);
-INSTANTIATE_TYPED_TEST_CASE_P(NodeHashSet, ModifiersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, ModifiersTest, SetTypes);
TEST(NodeHashSet, MoveableNotCopyableCompiles) {
node_hash_set<std::unique_ptr<void*>> t;
@@ -101,5 +103,5 @@ TEST(NodeHashSet, MergeExtractInsert) {
} // namespace
} // namespace container_internal
-} // inline namespace lts_2018_12_18
+} // inline namespace lts_2019_08_08
} // namespace absl