diff options
23 files changed, 192 insertions, 155 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index ea2c98b8..728c4be1 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -241,6 +241,7 @@ cc_library( ":hash_function_defaults", ":raw_hash_map", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) @@ -310,6 +311,7 @@ cc_library( ":node_slot_policy", ":raw_hash_map", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) @@ -342,6 +344,7 @@ cc_library( ":node_slot_policy", ":raw_hash_set", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index bb718cf8..b819deeb 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -274,6 +274,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults absl::raw_hash_map absl::algorithm_container @@ -347,6 +348,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_map @@ -381,6 +383,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_set diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 65b6790b..0ca497c8 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -153,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) { BM_LookupImpl<T>(state, true); } -// Benchmark deletion of values from a container. +// Benchmark erasing values from a container. template <typename T> -void BM_Delete(benchmark::State& state) { +void BM_Erase(benchmark::State& state) { using V = typename remove_pair_const<typename T::value_type>::type; typename KeyOfValue<typename T::key_type, V>::type key_of_value; std::vector<V> values = GenerateValues<V>(kBenchmarkValues); @@ -180,9 +180,9 @@ void BM_Delete(benchmark::State& state) { } } -// Benchmark deletion of multiple values from a container. +// Benchmark erasing multiple values from a container. template <typename T> -void BM_DeleteRange(benchmark::State& state) { +void BM_EraseRange(benchmark::State& state) { using V = typename remove_pair_const<typename T::value_type>::type; typename KeyOfValue<typename T::key_type, V>::type key_of_value; std::vector<V> values = GenerateValues<V>(kBenchmarkValues); @@ -222,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) { } } +// Predicate that erases every other element. We can't use a lambda because +// C++11 doesn't support generic lambdas. +// TODO(b/207389011): consider adding benchmarks that remove different fractions +// of keys (e.g. 10%, 90%). +struct EraseIfPred { + uint64_t i = 0; + template <typename T> + bool operator()(const T&) { + return ++i % 2; + } +}; + +// Benchmark erasing multiple values from a container with a predicate. +template <typename T> +void BM_EraseIf(benchmark::State& state) { + using V = typename remove_pair_const<typename T::value_type>::type; + std::vector<V> values = GenerateValues<V>(kBenchmarkValues); + + // Removes half of the keys per batch. + const int batch_size = (kBenchmarkValues + 1) / 2; + EraseIfPred pred; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + { + T container(values.begin(), values.end()); + state.ResumeTiming(); + erase_if(container, pred); + benchmark::DoNotOptimize(container); + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + // Benchmark steady-state insert (into first half of range) and remove (from // second half of range), treating the container approximately like a queue with // log-time access for all elements. This benchmark does not test the case where @@ -477,14 +511,14 @@ BTREE_TYPES(Time); void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \ BENCHMARK(BM_##type##_##func) -#define MY_BENCHMARK3(type) \ +#define MY_BENCHMARK3_STL(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ - MY_BENCHMARK4(type, Delete); \ - MY_BENCHMARK4(type, DeleteRange); \ + MY_BENCHMARK4(type, Erase); \ + MY_BENCHMARK4(type, EraseRange); \ MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, Fifo); \ @@ -492,9 +526,13 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeSorted) +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, EraseIf); \ + MY_BENCHMARK3_STL(type) + #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ - MY_BENCHMARK3(stl_##type); \ - MY_BENCHMARK3(stl_unordered_##type); \ + MY_BENCHMARK3_STL(stl_##type); \ + MY_BENCHMARK3_STL(stl_unordered_##type); \ MY_BENCHMARK3(btree_256_##type) #define MY_BENCHMARK2(type) \ @@ -684,12 +722,12 @@ double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) { btree_set<BigTypePtr<SIZE>>; \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \ btree_map<int, BigTypePtr<SIZE>>; \ - MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h index 797b949f..ad484ce0 100644 --- a/absl/container/btree_map.h +++ b/absl/container/btree_map.h @@ -482,15 +482,7 @@ void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) { template <typename K, typename V, typename C, typename A, typename Pred> typename btree_map<K, V, C, A>::size_type erase_if( btree_map<K, V, C, A> &map, Pred pred) { - const auto initial_size = map.size(); - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - return initial_size - map.size(); + return container_internal::btree_access::erase_if(map, std::move(pred)); } // absl::btree_multimap @@ -817,15 +809,7 @@ void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) { template <typename K, typename V, typename C, typename A, typename Pred> typename btree_multimap<K, V, C, A>::size_type erase_if( btree_multimap<K, V, C, A> &map, Pred pred) { - const auto initial_size = map.size(); - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - return initial_size - map.size(); + return container_internal::btree_access::erase_if(map, std::move(pred)); } namespace container_internal { diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h index 2c217d8f..78826830 100644 --- a/absl/container/btree_set.h +++ b/absl/container/btree_set.h @@ -402,15 +402,7 @@ void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) { template <typename K, typename C, typename A, typename Pred> typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set, Pred pred) { - const auto initial_size = set.size(); - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - return initial_size - set.size(); + return container_internal::btree_access::erase_if(set, std::move(pred)); } // absl::btree_multiset<> @@ -732,15 +724,7 @@ void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) { template <typename K, typename C, typename A, typename Pred> typename btree_multiset<K, C, A>::size_type erase_if( btree_multiset<K, C, A> & set, Pred pred) { - const auto initial_size = set.size(); - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - return initial_size - set.size(); + return container_internal::btree_access::erase_if(set, std::move(pred)); } namespace container_internal { diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 650c51db..13cb8186 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -2490,6 +2490,20 @@ TEST(Btree, EraseIf) { EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, ElementsAre(1, 3, 5)); } + // Test that erase_if invokes the predicate once per element. + { + absl::btree_set<int> s; + for (int i = 0; i < 1000; ++i) s.insert(i); + int pred_calls = 0; + EXPECT_EQ(erase_if(s, + [&pred_calls](int k) { + ++pred_calls; + return k % 2; + }), + 500); + EXPECT_THAT(s, SizeIs(500)); + EXPECT_EQ(pred_calls, 1000); + } } TEST(Btree, InsertOrAssign) { diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index 69fbf239..83c71029 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -36,6 +36,7 @@ #include <utility> #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index bf65a03d..29603379 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -830,6 +830,7 @@ class btree_node { template <typename N, typename R, typename P> friend struct btree_iterator; friend class BtreeNodePeer; + friend struct btree_access; }; template <typename Node, typename Reference, typename Pointer> @@ -964,6 +965,7 @@ struct btree_iterator { friend class btree_multiset_container; template <typename TreeType, typename CheckerType> friend class base_checker; + friend struct btree_access; const key_type &key() const { return node->key(position); } slot_type *slot() { return node->slot(position); } @@ -1336,6 +1338,8 @@ class btree { allocator_type get_allocator() const { return allocator(); } private: + friend struct btree_access; + // Internal accessor routines. node_type *root() { return root_.template get<2>(); } const node_type *root() const { return root_.template get<2>(); } @@ -2530,6 +2534,48 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo, return count; } +struct btree_access { + template <typename BtreeContainer, typename Pred> + static auto erase_if(BtreeContainer &container, Pred pred) + -> typename BtreeContainer::size_type { + const auto initial_size = container.size(); + auto &tree = container.tree_; + auto *alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) { + if (!pred(*it)) { + ++it; + continue; + } + auto *node = it.node; + if (!node->leaf()) { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position; + node->value_destroy(it.position, alloc); + while (++it.position < node->finish()) { + if (pred(*it)) { + node->value_destroy(it.position, alloc); + } else { + node->transfer(node->slot(to_pos++), node->slot(it.position), + alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } +}; + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h index a99668c7..d28b2446 100644 --- a/absl/container/internal/btree_container.h +++ b/absl/container/internal/btree_container.h @@ -228,6 +228,7 @@ class btree_container { } protected: + friend struct btree_access; Tree tree_; }; diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index a3e15a70..1d24db5f 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -61,13 +61,10 @@ HashtablezSampler& GlobalHashtablezSampler() { return *sampler; } -// TODO(bradleybear): The comments at this constructors declaration say that the -// fields are not initialized, but this definition does initialize the fields. -// Something needs to be cleaned up. -HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } +HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling() { +void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -85,6 +82,7 @@ void HashtablezInfo::PrepareForSampling() { // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); + inline_element_size = inline_element_size_value; } static bool ShouldForceSampling() { @@ -110,9 +108,8 @@ static bool ShouldForceSampling() { HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { *next_sample = 1; - HashtablezInfo* result = GlobalHashtablezSampler().Register(); - result->inline_element_size.store(inline_element_size, - std::memory_order_relaxed); + HashtablezInfo* result = + GlobalHashtablezSampler().Register(inline_element_size); return result; } @@ -138,10 +135,7 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { return SampleSlow(next_sample, inline_element_size); } - HashtablezInfo* result = GlobalHashtablezSampler().Register(); - result->inline_element_size.store(inline_element_size, - std::memory_order_relaxed); - return result; + return GlobalHashtablezSampler().Register(inline_element_size); #endif } diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 0fd9349f..6738786c 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + void PrepareForSampling(size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. @@ -82,18 +83,6 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { std::atomic<size_t> hashes_bitwise_xor; std::atomic<size_t> max_reserve; - // One could imagine that inline_element_size could be non-atomic, since it - // *almost* follows the rules for the fields that are set by - // `PrepareForSampling`. However, TSAN reports a race (see b/207323922) in - // which - // A: Thread 1: Register() returns, unlocking init_mu. - // B: Thread 2: Iterate() is called, locking init_mu. - // C: Thread 1: inlined_element_size is stored. - // D: Thread 2: inlined_element_size is accessed (a race). - // A simple solution is to make inline_element_size atomic so that we treat it - // at as we do the other atomic fields. - std::atomic<size_t> inline_element_size; - // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. // These are guarded by init_mu, but that is not externalized to clients, @@ -103,6 +92,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { absl::Time create_time; int32_t depth; void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index a7307a20..ac6ed9b1 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -70,7 +70,8 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) { } HashtablezInfo* Register(HashtablezSampler* s, size_t size) { - auto* info = s->Register(); + const size_t test_element_size = 17; + auto* info = s->Register(test_element_size); assert(info != nullptr); info->size.store(size); return info; @@ -81,9 +82,8 @@ TEST(HashtablezInfoTest, PrepareForSampling) { const size_t test_element_size = 17; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + info.PrepareForSampling(test_element_size); - info.inline_element_size.store(test_element_size, std::memory_order_relaxed); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); info.capacity.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed); @@ -108,7 +108,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); - info.PrepareForSampling(); + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -119,14 +119,15 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); EXPECT_GE(info.create_time, test_start); } TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 19; + info.PrepareForSampling(test_element_size); RecordStorageChangedSlow(&info, 17, 47); EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.capacity.load(), 47); @@ -138,7 +139,8 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 23; + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.max_probe_length.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); @@ -161,8 +163,7 @@ TEST(HashtablezInfoTest, RecordErase) { const size_t test_element_size = 29; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); - info.inline_element_size.store(test_element_size); + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); @@ -170,15 +171,14 @@ TEST(HashtablezInfoTest, RecordErase) { RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 1); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordRehash) { const size_t test_element_size = 31; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); - info.inline_element_size.store(test_element_size); + info.PrepareForSampling(test_element_size); RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength); @@ -197,13 +197,14 @@ TEST(HashtablezInfoTest, RecordRehash) { EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 1); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordReservation) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 33; + info.PrepareForSampling(test_element_size); RecordReservationSlow(&info, 3); EXPECT_EQ(info.max_reserve.load(), 3); @@ -266,7 +267,8 @@ TEST(HashtablezSamplerTest, Sample) { TEST(HashtablezSamplerTest, Handle) { auto& sampler = GlobalHashtablezSampler(); - HashtablezInfoHandle h(sampler.Register()); + const size_t test_element_size = 39; + HashtablezInfoHandle h(sampler.Register(test_element_size)); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); @@ -338,18 +340,19 @@ TEST(HashtablezSamplerTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { - pool.Schedule([&sampler, &stop]() { + const size_t elt_size = 10 + i % 2; + pool.Schedule([&sampler, &stop, elt_size]() { std::random_device rd; std::mt19937 gen(rd()); std::vector<HashtablezInfo*> infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(elt_size)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(elt_size)); break; } case 1: { diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 4b9f6cc0..362b3cae 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -2075,8 +2075,7 @@ TEST(RawHashSamplerTest, Sample) { std::memory_order_relaxed)]++; reservations[info.max_reserve.load(std::memory_order_relaxed)]++; } - EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), - sizeof(int64_t)); + EXPECT_EQ(info.inline_element_size, sizeof(int64_t)); ++end_size; }); diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h index 0d4ebeda..ca1ed408 100644 --- a/absl/container/node_hash_map.h +++ b/absl/container/node_hash_map.h @@ -41,6 +41,7 @@ #include <utility> #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/node_slot_policy.h" diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h index 3bc5fe6b..9421e11e 100644 --- a/absl/container/node_hash_set.h +++ b/absl/container/node_hash_set.h @@ -38,6 +38,7 @@ #include <type_traits> #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/node_slot_policy.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export diff --git a/absl/container/sample_element_size_test.cc b/absl/container/sample_element_size_test.cc index 4bbef210..b23626b4 100644 --- a/absl/container/sample_element_size_test.cc +++ b/absl/container/sample_element_size_test.cc @@ -51,8 +51,7 @@ void TestInlineElementSize( size_t new_count = 0; sampler.Iterate([&](const HashtablezInfo& info) { if (preexisting_info.insert(&info).second) { - EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), - expected_element_size); + EXPECT_EQ(info.inline_element_size, expected_element_size); ++new_count; } }); diff --git a/absl/functional/function_ref.h b/absl/functional/function_ref.h index 824e3cea..f9779607 100644 --- a/absl/functional/function_ref.h +++ b/absl/functional/function_ref.h @@ -69,7 +69,8 @@ class FunctionRef; // An `absl::FunctionRef` is a lightweight wrapper to any invokable object with // a compatible signature. Generally, an `absl::FunctionRef` should only be used // as an argument type and should be preferred as an argument over a const -// reference to a `std::function`. +// reference to a `std::function`. `absl::FunctionRef` itself does not allocate, +// although the wrapped invokable may. // // Example: // diff --git a/absl/functional/function_ref_test.cc b/absl/functional/function_ref_test.cc index 3aa59745..412027cd 100644 --- a/absl/functional/function_ref_test.cc +++ b/absl/functional/function_ref_test.cc @@ -14,6 +14,7 @@ #include "absl/functional/function_ref.h" +#include <functional> #include <memory> #include "gmock/gmock.h" diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h index 6c146210..fc269bdd 100644 --- a/absl/profiling/internal/sample_recorder.h +++ b/absl/profiling/internal/sample_recorder.h @@ -59,7 +59,8 @@ class SampleRecorder { ~SampleRecorder(); // Registers for sampling. Returns an opaque registration info. - T* Register(); + template <typename... Targs> + T* Register(Targs&&... args); // Unregisters the sample. void Unregister(T* sample); @@ -81,7 +82,8 @@ class SampleRecorder { private: void PushNew(T* sample); void PushDead(T* sample); - T* PopDead(); + template <typename... Targs> + T* PopDead(Targs... args); std::atomic<size_t> dropped_samples_; std::atomic<size_t> size_estimate_; @@ -163,7 +165,8 @@ void SampleRecorder<T>::PushDead(T* sample) { } template <typename T> -T* SampleRecorder<T>::PopDead() { +template <typename... Targs> +T* SampleRecorder<T>::PopDead(Targs... args) { absl::MutexLock graveyard_lock(&graveyard_.init_mu); // The list is circular, so eventually it collapses down to @@ -175,12 +178,13 @@ T* SampleRecorder<T>::PopDead() { absl::MutexLock sample_lock(&sample->init_mu); graveyard_.dead = sample->dead; sample->dead = nullptr; - sample->PrepareForSampling(); + sample->PrepareForSampling(std::forward<Targs>(args)...); return sample; } template <typename T> -T* SampleRecorder<T>::Register() { +template <typename... Targs> +T* SampleRecorder<T>::Register(Targs&&... args) { int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); if (size > max_samples_.load(std::memory_order_relaxed)) { size_estimate_.fetch_sub(1, std::memory_order_relaxed); @@ -188,10 +192,14 @@ T* SampleRecorder<T>::Register() { return nullptr; } - T* sample = PopDead(); + T* sample = PopDead(args...); if (sample == nullptr) { // Resurrection failed. Hire a new warlock. sample = new T(); + { + absl::MutexLock sample_lock(&sample->init_mu); + sample->PrepareForSampling(std::forward<Targs>(args)...); + } PushNew(sample); } diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 0015bb9f..5905bac0 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -468,46 +468,6 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region, return true; } -template <bool has_length> -void Cord::InlineRep::GetAppendRegion(char** region, size_t* size, - size_t length) { - auto constexpr method = CordzUpdateTracker::kGetAppendRegion; - - CordRep* root = tree(); - size_t sz = root ? root->length : inline_size(); - if (root == nullptr) { - size_t available = kMaxInline - sz; - if (available >= (has_length ? length : 1)) { - *region = data_.as_chars() + sz; - *size = has_length ? length : available; - set_inline_size(has_length ? sz + length : kMaxInline); - return; - } - } - - size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength); - CordzUpdateScope scope(root ? data_.cordz_info() : nullptr, method); - CordRep* rep = root ? cord_internal::RemoveCrcNode(root) - : MakeFlatWithExtraCapacity(extra); - if (PrepareAppendRegion(rep, region, size, length)) { - CommitTree(root, rep, scope, method); - return; - } - - // Allocate new node. - CordRepFlat* new_node = CordRepFlat::New(extra); - new_node->length = std::min(new_node->Capacity(), length); - *region = new_node->Data(); - *size = new_node->length; - - if (btree_enabled()) { - rep = CordRepBtree::Append(ForceBtree(rep), new_node); - } else { - rep = Concat(rep, new_node); - } - CommitTree(root, rep, scope, method); -} - // Computes the memory side of the provided edge which must be a valid data edge // for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node. static bool RepMemoryUsageDataEdge(const CordRep* rep, diff --git a/absl/strings/internal/cordz_update_tracker.h b/absl/strings/internal/cordz_update_tracker.h index 7a41c180..c5170662 100644 --- a/absl/strings/internal/cordz_update_tracker.h +++ b/absl/strings/internal/cordz_update_tracker.h @@ -39,8 +39,8 @@ class CordzUpdateTracker { // Tracked update methods. enum MethodIdentifier { kUnknown, - kAppendBuffer, kAppendCord, + kAppendCordBuffer, kAppendExternalMemory, kAppendString, kAssignCord, @@ -50,13 +50,14 @@ class CordzUpdateTracker { kConstructorString, kCordReader, kFlatten, + kGetAppendBuffer, kGetAppendRegion, kMakeCordFromExternal, kMoveAppendCord, kMoveAssignCord, kMovePrependCord, - kPrependBuffer, kPrependCord, + kPrependCordBuffer, kPrependString, kRemovePrefix, kRemoveSuffix, diff --git a/absl/strings/internal/cordz_update_tracker_test.cc b/absl/strings/internal/cordz_update_tracker_test.cc index 8eda529e..9b1f7986 100644 --- a/absl/strings/internal/cordz_update_tracker_test.cc +++ b/absl/strings/internal/cordz_update_tracker_test.cc @@ -37,8 +37,8 @@ using Methods = std::array<Method, Method::kNumMethods>; // Returns an array of all methods defined in `MethodIdentifier` Methods AllMethods() { return Methods{Method::kUnknown, - Method::kAppendBuffer, Method::kAppendCord, + Method::kAppendCordBuffer, Method::kAppendExternalMemory, Method::kAppendString, Method::kAssignCord, @@ -48,13 +48,14 @@ Methods AllMethods() { Method::kConstructorString, Method::kCordReader, Method::kFlatten, + Method::kGetAppendBuffer, Method::kGetAppendRegion, Method::kMakeCordFromExternal, Method::kMoveAppendCord, Method::kMoveAssignCord, Method::kMovePrependCord, - Method::kPrependBuffer, Method::kPrependCord, + Method::kPrependCordBuffer, Method::kPrependString, Method::kRemovePrefix, Method::kRemoveSuffix, diff --git a/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/absl/time/internal/cctz/include/cctz/civil_time_detail.h index 8aadde57..a5b084e6 100644 --- a/absl/time/internal/cctz/include/cctz/civil_time_detail.h +++ b/absl/time/internal/cctz/include/cctz/civil_time_detail.h @@ -84,14 +84,13 @@ CONSTEXPR_F bool is_leap_year(year_t y) noexcept { return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); } CONSTEXPR_F int year_index(year_t y, month_t m) noexcept { - return (static_cast<int>((y + (m > 2)) % 400) + 400) % 400; + const int yi = static_cast<int>((y + (m > 2)) % 400); + return yi < 0 ? yi + 400 : yi; } -CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept { - const int yi = year_index(y, m); +CONSTEXPR_F int days_per_century(int yi) noexcept { return 36524 + (yi == 0 || yi > 300); } -CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept { - const int yi = year_index(y, m); +CONSTEXPR_F int days_per_4years(int yi) noexcept { return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96); } CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept { @@ -133,17 +132,22 @@ CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh, } } if (d > 365) { + int yi = year_index(ey, m); // Index into Gregorian 400 year cycle. for (;;) { - int n = days_per_century(ey, m); + int n = days_per_century(yi); if (d <= n) break; d -= n; ey += 100; + yi += 100; + if (yi >= 400) yi -= 400; } for (;;) { - int n = days_per_4years(ey, m); + int n = days_per_4years(yi); if (d <= n) break; d -= n; ey += 4; + yi += 4; + if (yi >= 400) yi -= 400; } for (;;) { int n = days_per_year(ey, m); |