diff options
author | Abseil Team <absl-team@google.com> | 2021-12-03 10:01:02 -0800 |
---|---|---|
committer | Andy Getz <durandal@google.com> | 2021-12-03 13:15:13 -0500 |
commit | 9336be04a242237cd41a525bedfcf3be1bb55377 (patch) | |
tree | 781262c8d79eb78d942bdc2c945dda8b225ca51e /absl/container | |
parent | e11e039e02ef30a98a7928ce6c59cebe096dd753 (diff) |
Export of internal Abseil changes
--
e7f53dfbf809812e84770217777f81b6308a3084 by Abseil Team <absl-team@google.com>:
Add a parameter pack to absl profile to allow profiles to separate
dynamic data from static data that is available at constructor-time.
Background: `inline_element_size` is effectively constant, but there
is a data race between its initialization and its access. We had fixed that race by making
inline_element_size atomic. This CL changes `inline_element_size`
back to a non-atomic integer, and provides a way for all profiles to
provide Register()-time values.
PiperOrigin-RevId: 413960559
--
70234c5943f8e37e17c1d9c54d8ed61d39880abf by Chris Kennelly <ckennelly@google.com>:
Document that absl::FunctionRef does not allocate.
PiperOrigin-RevId: 413946831
--
3308ae571412c4be3cc32d088c6edac98ff2d1ed by Samuel Benzaquen <sbenza@google.com>:
Internal change
PiperOrigin-RevId: 413933619
--
1617093a730d055edcf7bc04fdd6509783f5f75d by Martijn Vels <mvels@google.com>:
Internal Change
PiperOrigin-RevId: 413778735
--
03ad683f059c806a6c8b04f5b79b2662c3df8c73 by Evan Brown <ezb@google.com>:
Unify btree erase_if definitions and optimize them so that we only do rebalancing once per leaf node.
PiperOrigin-RevId: 413757280
--
5ba402f70801938178e486617063f01c7862525d by Martijn Vels <mvels@google.com>:
Cleanup up cord sampling internals
PiperOrigin-RevId: 413755011
--
522da8f9d3e0f11630d89fb41952004742bc335a by Evan Brown <ezb@google.com>:
Add b-tree benchmark for erase_if.
Since this benchmark doesn't work for std:: containers before C++20, disable it for them.
PiperOrigin-RevId: 413740844
--
a690ea42de8ed4a761d00235d8b2fb7548ba9732 by Andy Getzendanner <durandal@google.com>:
Import of CCTZ from GitHub.
PiperOrigin-RevId: 413735737
GitOrigin-RevId: e7f53dfbf809812e84770217777f81b6308a3084
Change-Id: I4f9f9039ba92831bc48971964aa063244c9fed72
Diffstat (limited to 'absl/container')
-rw-r--r-- | absl/container/BUILD.bazel | 3 | ||||
-rw-r--r-- | absl/container/CMakeLists.txt | 3 | ||||
-rw-r--r-- | absl/container/btree_benchmark.cc | 64 | ||||
-rw-r--r-- | absl/container/btree_map.h | 20 | ||||
-rw-r--r-- | absl/container/btree_set.h | 20 | ||||
-rw-r--r-- | absl/container/btree_test.cc | 14 | ||||
-rw-r--r-- | absl/container/flat_hash_map.h | 1 | ||||
-rw-r--r-- | absl/container/internal/btree.h | 46 | ||||
-rw-r--r-- | absl/container/internal/btree_container.h | 1 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 18 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.h | 16 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_test.cc | 41 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set_test.cc | 3 | ||||
-rw-r--r-- | absl/container/node_hash_map.h | 1 | ||||
-rw-r--r-- | absl/container/node_hash_set.h | 1 | ||||
-rw-r--r-- | absl/container/sample_element_size_test.cc | 3 |
16 files changed, 158 insertions, 97 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index ea2c98b8..728c4be1 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -241,6 +241,7 @@ cc_library( ":hash_function_defaults", ":raw_hash_map", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) @@ -310,6 +311,7 @@ cc_library( ":node_slot_policy", ":raw_hash_map", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) @@ -342,6 +344,7 @@ cc_library( ":node_slot_policy", ":raw_hash_set", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index bb718cf8..b819deeb 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -274,6 +274,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults absl::raw_hash_map absl::algorithm_container @@ -347,6 +348,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_map @@ -381,6 +383,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_set diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 65b6790b..0ca497c8 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -153,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) { BM_LookupImpl<T>(state, true); } -// Benchmark deletion of values from a container. +// Benchmark erasing values from a container. template <typename T> -void BM_Delete(benchmark::State& state) { +void BM_Erase(benchmark::State& state) { using V = typename remove_pair_const<typename T::value_type>::type; typename KeyOfValue<typename T::key_type, V>::type key_of_value; std::vector<V> values = GenerateValues<V>(kBenchmarkValues); @@ -180,9 +180,9 @@ void BM_Delete(benchmark::State& state) { } } -// Benchmark deletion of multiple values from a container. +// Benchmark erasing multiple values from a container. template <typename T> -void BM_DeleteRange(benchmark::State& state) { +void BM_EraseRange(benchmark::State& state) { using V = typename remove_pair_const<typename T::value_type>::type; typename KeyOfValue<typename T::key_type, V>::type key_of_value; std::vector<V> values = GenerateValues<V>(kBenchmarkValues); @@ -222,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) { } } +// Predicate that erases every other element. We can't use a lambda because +// C++11 doesn't support generic lambdas. +// TODO(b/207389011): consider adding benchmarks that remove different fractions +// of keys (e.g. 10%, 90%). +struct EraseIfPred { + uint64_t i = 0; + template <typename T> + bool operator()(const T&) { + return ++i % 2; + } +}; + +// Benchmark erasing multiple values from a container with a predicate. +template <typename T> +void BM_EraseIf(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + + // Removes half of the keys per batch. + const int batch_size = (kBenchmarkValues + 1) / 2; + EraseIfPred pred; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + { + T container(values.begin(), values.end()); + state.ResumeTiming(); + erase_if(container, pred); + benchmark::DoNotOptimize(container); + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + // Benchmark steady-state insert (into first half of range) and remove (from // second half of range), treating the container approximately like a queue with // log-time access for all elements. This benchmark does not test the case where @@ -477,14 +511,14 @@ BTREE_TYPES(Time); void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \ BENCHMARK(BM_##type##_##func) -#define MY_BENCHMARK3(type) \ +#define MY_BENCHMARK3_STL(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ - MY_BENCHMARK4(type, Delete); \ - MY_BENCHMARK4(type, DeleteRange); \ + MY_BENCHMARK4(type, Erase); \ + MY_BENCHMARK4(type, EraseRange); \ MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, Fifo); \ @@ -492,9 +526,13 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeSorted) +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, EraseIf); \ + MY_BENCHMARK3_STL(type) + #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ - MY_BENCHMARK3(stl_##type); \ - MY_BENCHMARK3(stl_unordered_##type); \ + MY_BENCHMARK3_STL(stl_##type); \ + MY_BENCHMARK3_STL(stl_unordered_##type); \ MY_BENCHMARK3(btree_256_##type) #define MY_BENCHMARK2(type) \ @@ -684,12 +722,12 @@ double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) { btree_set<BigTypePtr<SIZE>>; \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \ btree_map<int, BigTypePtr<SIZE>>; \ - MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h index 797b949f..ad484ce0 100644 --- a/absl/container/btree_map.h +++ b/absl/container/btree_map.h @@ -482,15 +482,7 @@ void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) { template <typename K, typename V, typename C, typename A, typename Pred> typename btree_map<K, V, C, A>::size_type erase_if( btree_map<K, V, C, A> &map, Pred pred) { - const auto initial_size = map.size(); - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - return initial_size - map.size(); + return container_internal::btree_access::erase_if(map, std::move(pred)); } // absl::btree_multimap @@ -817,15 +809,7 @@ void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) { template <typename K, typename V, typename C, typename A, typename Pred> typename btree_multimap<K, V, C, A>::size_type erase_if( btree_multimap<K, V, C, A> &map, Pred pred) { - const auto initial_size = map.size(); - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - return initial_size - map.size(); + return container_internal::btree_access::erase_if(map, std::move(pred)); } namespace container_internal { diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h index 2c217d8f..78826830 100644 --- a/absl/container/btree_set.h +++ b/absl/container/btree_set.h @@ -402,15 +402,7 @@ void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) { template <typename K, typename C, typename A, typename Pred> typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set, Pred pred) { - const auto initial_size = set.size(); - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - return initial_size - set.size(); + return container_internal::btree_access::erase_if(set, std::move(pred)); } // absl::btree_multiset<> @@ -732,15 +724,7 @@ void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) { template <typename K, typename C, typename A, typename Pred> typename btree_multiset<K, C, A>::size_type erase_if( btree_multiset<K, C, A> & set, Pred pred) { - const auto initial_size = set.size(); - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - return initial_size - set.size(); + return container_internal::btree_access::erase_if(set, std::move(pred)); } namespace container_internal { diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 650c51db..13cb8186 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -2490,6 +2490,20 @@ TEST(Btree, EraseIf) { EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, ElementsAre(1, 3, 5)); } + // Test that erase_if invokes the predicate once per element. + { + absl::btree_set<int> s; + for (int i = 0; i < 1000; ++i) s.insert(i); + int pred_calls = 0; + EXPECT_EQ(erase_if(s, + [&pred_calls](int k) { + ++pred_calls; + return k % 2; + }), + 500); + EXPECT_THAT(s, SizeIs(500)); + EXPECT_EQ(pred_calls, 1000); + } } TEST(Btree, InsertOrAssign) { diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index 69fbf239..83c71029 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -36,6 +36,7 @@ #include <utility> #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index bf65a03d..29603379 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -830,6 +830,7 @@ class btree_node { template <typename N, typename R, typename P> friend struct btree_iterator; friend class BtreeNodePeer; + friend struct btree_access; }; template <typename Node, typename Reference, typename Pointer> @@ -964,6 +965,7 @@ struct btree_iterator { friend class btree_multiset_container; template <typename TreeType, typename CheckerType> friend class base_checker; + friend struct btree_access; const key_type &key() const { return node->key(position); } slot_type *slot() { return node->slot(position); } @@ -1336,6 +1338,8 @@ class btree { allocator_type get_allocator() const { return allocator(); } private: + friend struct btree_access; + // Internal accessor routines. node_type *root() { return root_.template get<2>(); } const node_type *root() const { return root_.template get<2>(); } @@ -2530,6 +2534,48 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo, return count; } +struct btree_access { + template <typename BtreeContainer, typename Pred> + static auto erase_if(BtreeContainer &container, Pred pred) + -> typename BtreeContainer::size_type { + const auto initial_size = container.size(); + auto &tree = container.tree_; + auto *alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) { + if (!pred(*it)) { + ++it; + continue; + } + auto *node = it.node; + if (!node->leaf()) { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position; + node->value_destroy(it.position, alloc); + while (++it.position < node->finish()) { + if (pred(*it)) { + node->value_destroy(it.position, alloc); + } else { + node->transfer(node->slot(to_pos++), node->slot(it.position), + alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } +}; + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h index a99668c7..d28b2446 100644 --- a/absl/container/internal/btree_container.h +++ b/absl/container/internal/btree_container.h @@ -228,6 +228,7 @@ class btree_container { } protected: + friend struct btree_access; Tree tree_; }; diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index a3e15a70..1d24db5f 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -61,13 +61,10 @@ HashtablezSampler& GlobalHashtablezSampler() { return *sampler; } -// TODO(bradleybear): The comments at this constructors declaration say that the -// fields are not initialized, but this definition does initialize the fields. -// Something needs to be cleaned up. -HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } +HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling() { +void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -85,6 +82,7 @@ void HashtablezInfo::PrepareForSampling() { // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); + inline_element_size = inline_element_size_value; } static bool ShouldForceSampling() { @@ -110,9 +108,8 @@ static bool ShouldForceSampling() { HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { *next_sample = 1; - HashtablezInfo* result = GlobalHashtablezSampler().Register(); - result->inline_element_size.store(inline_element_size, - std::memory_order_relaxed); + HashtablezInfo* result = + GlobalHashtablezSampler().Register(inline_element_size); return result; } @@ -138,10 +135,7 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { return SampleSlow(next_sample, inline_element_size); } - HashtablezInfo* result = GlobalHashtablezSampler().Register(); - result->inline_element_size.store(inline_element_size, - std::memory_order_relaxed); - return result; + return GlobalHashtablezSampler().Register(inline_element_size); #endif } diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 0fd9349f..6738786c 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + void PrepareForSampling(size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. @@ -82,18 +83,6 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { std::atomic<size_t> hashes_bitwise_xor; std::atomic<size_t> max_reserve; - // One could imagine that inline_element_size could be non-atomic, since it - // *almost* follows the rules for the fields that are set by - // `PrepareForSampling`. However, TSAN reports a race (see b/207323922) in - // which - // A: Thread 1: Register() returns, unlocking init_mu. - // B: Thread 2: Iterate() is called, locking init_mu. - // C: Thread 1: inlined_element_size is stored. - // D: Thread 2: inlined_element_size is accessed (a race). - // A simple solution is to make inline_element_size atomic so that we treat it - // at as we do the other atomic fields. - std::atomic<size_t> inline_element_size; - // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. // These are guarded by init_mu, but that is not externalized to clients, @@ -103,6 +92,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { absl::Time create_time; int32_t depth; void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index a7307a20..ac6ed9b1 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -70,7 +70,8 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) { } HashtablezInfo* Register(HashtablezSampler* s, size_t size) { - auto* info = s->Register(); + const size_t test_element_size = 17; + auto* info = s->Register(test_element_size); assert(info != nullptr); info->size.store(size); return info; @@ -81,9 +82,8 @@ TEST(HashtablezInfoTest, PrepareForSampling) { const size_t test_element_size = 17; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + info.PrepareForSampling(test_element_size); - info.inline_element_size.store(test_element_size, std::memory_order_relaxed); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); info.capacity.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed); @@ -108,7 +108,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); - info.PrepareForSampling(); + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -119,14 +119,15 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); EXPECT_GE(info.create_time, test_start); } TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 19; + info.PrepareForSampling(test_element_size); RecordStorageChangedSlow(&info, 17, 47); EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.capacity.load(), 47); @@ -138,7 +139,8 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 23; + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.max_probe_length.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); @@ -161,8 +163,7 @@ TEST(HashtablezInfoTest, RecordErase) { const size_t test_element_size = 29; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); - info.inline_element_size.store(test_element_size); + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); @@ -170,15 +171,14 @@ TEST(HashtablezInfoTest, RecordErase) { RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 1); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordRehash) { const size_t test_element_size = 31; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); - info.inline_element_size.store(test_element_size); + info.PrepareForSampling(test_element_size); RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength); @@ -197,13 +197,14 @@ TEST(HashtablezInfoTest, RecordRehash) { EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 1); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordReservation) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 33; + info.PrepareForSampling(test_element_size); RecordReservationSlow(&info, 3); EXPECT_EQ(info.max_reserve.load(), 3); @@ -266,7 +267,8 @@ TEST(HashtablezSamplerTest, Sample) { TEST(HashtablezSamplerTest, Handle) { auto& sampler = GlobalHashtablezSampler(); - HashtablezInfoHandle h(sampler.Register()); + const size_t test_element_size = 39; + HashtablezInfoHandle h(sampler.Register(test_element_size)); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); @@ -338,18 +340,19 @@ TEST(HashtablezSamplerTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { - pool.Schedule([&sampler, &stop]() { + const size_t elt_size = 10 + i % 2; + pool.Schedule([&sampler, &stop, elt_size]() { std::random_device rd; std::mt19937 gen(rd()); std::vector<HashtablezInfo*> infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(elt_size)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(elt_size)); break; } case 1: { diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 4b9f6cc0..362b3cae 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -2075,8 +2075,7 @@ TEST(RawHashSamplerTest, Sample) { std::memory_order_relaxed)]++; reservations[info.max_reserve.load(std::memory_order_relaxed)]++; } - EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), - sizeof(int64_t)); + EXPECT_EQ(info.inline_element_size, sizeof(int64_t)); ++end_size; }); diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h index 0d4ebeda..ca1ed408 100644 --- a/absl/container/node_hash_map.h +++ b/absl/container/node_hash_map.h @@ -41,6 +41,7 @@ #include <utility> #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/node_slot_policy.h" diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h index 3bc5fe6b..9421e11e 100644 --- a/absl/container/node_hash_set.h +++ b/absl/container/node_hash_set.h @@ -38,6 +38,7 @@ #include <type_traits> #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/node_slot_policy.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export diff --git a/absl/container/sample_element_size_test.cc b/absl/container/sample_element_size_test.cc index 4bbef210..b23626b4 100644 --- a/absl/container/sample_element_size_test.cc +++ b/absl/container/sample_element_size_test.cc @@ -51,8 +51,7 @@ void TestInlineElementSize( size_t new_count = 0; sampler.Iterate([&](const HashtablezInfo& info) { if (preexisting_info.insert(&info).second) { - EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), - expected_element_size); + EXPECT_EQ(info.inline_element_size, expected_element_size); ++new_count; } }); |