diff options
Diffstat (limited to 'absl/container/internal')
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 104 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.h | 81 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_test.cc | 3 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set_test.cc | 4 |
4 files changed, 16 insertions, 176 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 5a29bed7..ca03d9b6 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -25,6 +25,7 @@ #include "absl/container/internal/have_sse.h" #include "absl/debugging/stacktrace.h" #include "absl/memory/memory.h" +#include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" namespace absl { @@ -37,7 +38,6 @@ ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{ false }; ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10}; -ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20}; #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased @@ -50,16 +50,11 @@ ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -HashtablezSampler& HashtablezSampler::Global() { +HashtablezSampler& GlobalHashtablezSampler() { static auto* sampler = new HashtablezSampler(); return *sampler; } -HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback( - DisposeCallback f) { - return dispose_.exchange(f, std::memory_order_relaxed); -} - HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } HashtablezInfo::~HashtablezInfo() = default; @@ -80,93 +75,6 @@ void HashtablezInfo::PrepareForSampling() { // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); - dead = nullptr; -} - -HashtablezSampler::HashtablezSampler() - : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { - absl::MutexLock l(&graveyard_.init_mu); - graveyard_.dead = &graveyard_; -} - -HashtablezSampler::~HashtablezSampler() { - HashtablezInfo* s = all_.load(std::memory_order_acquire); - while (s != nullptr) { - HashtablezInfo* next = s->next; - delete s; - s = next; - } -} - -void HashtablezSampler::PushNew(HashtablezInfo* sample) { - sample->next = all_.load(std::memory_order_relaxed); - while (!all_.compare_exchange_weak(sample->next, sample, - std::memory_order_release, - std::memory_order_relaxed)) { - } -} - -void HashtablezSampler::PushDead(HashtablezInfo* sample) { - if (auto* dispose = dispose_.load(std::memory_order_relaxed)) { - dispose(*sample); - } - - absl::MutexLock graveyard_lock(&graveyard_.init_mu); - absl::MutexLock sample_lock(&sample->init_mu); - sample->dead = graveyard_.dead; - graveyard_.dead = sample; -} - -HashtablezInfo* HashtablezSampler::PopDead() { - absl::MutexLock graveyard_lock(&graveyard_.init_mu); - - // The list is circular, so eventually it collapses down to - // graveyard_.dead == &graveyard_ - // when it is empty. - HashtablezInfo* sample = graveyard_.dead; - if (sample == &graveyard_) return nullptr; - - absl::MutexLock sample_lock(&sample->init_mu); - graveyard_.dead = sample->dead; - sample->PrepareForSampling(); - return sample; -} - -HashtablezInfo* HashtablezSampler::Register() { - int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); - if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) { - size_estimate_.fetch_sub(1, std::memory_order_relaxed); - dropped_samples_.fetch_add(1, std::memory_order_relaxed); - return nullptr; - } - - HashtablezInfo* sample = PopDead(); - if (sample == nullptr) { - // Resurrection failed. Hire a new warlock. - sample = new HashtablezInfo(); - PushNew(sample); - } - - return sample; -} - -void HashtablezSampler::Unregister(HashtablezInfo* sample) { - PushDead(sample); - size_estimate_.fetch_sub(1, std::memory_order_relaxed); -} - -int64_t HashtablezSampler::Iterate( - const std::function<void(const HashtablezInfo& stack)>& f) { - HashtablezInfo* s = all_.load(std::memory_order_acquire); - while (s != nullptr) { - absl::MutexLock l(&s->init_mu); - if (s->dead == nullptr) { - f(*s); - } - s = s->next; - } - - return dropped_samples_.load(std::memory_order_relaxed); } static bool ShouldForceSampling() { @@ -192,7 +100,7 @@ static bool ShouldForceSampling() { HashtablezInfo* SampleSlow(int64_t* next_sample) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { *next_sample = 1; - return HashtablezSampler::Global().Register(); + return GlobalHashtablezSampler().Register(); } #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) @@ -217,12 +125,12 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) { return SampleSlow(next_sample); } - return HashtablezSampler::Global().Register(); + return GlobalHashtablezSampler().Register(); #endif } void UnsampleSlow(HashtablezInfo* info) { - HashtablezSampler::Global().Unregister(info); + GlobalHashtablezSampler().Unregister(info); } void RecordInsertSlow(HashtablezInfo* info, size_t hash, @@ -262,7 +170,7 @@ void SetHashtablezSampleParameter(int32_t rate) { void SetHashtablezMaxSamples(int32_t max) { if (max > 0) { - g_hashtablez_max_samples.store(max, std::memory_order_release); + GlobalHashtablezSampler().SetMaxSamples(max); } else { ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld", static_cast<long long>(max)); // NOLINT(runtime/int) diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 85685f72..d86207f5 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -47,6 +47,7 @@ #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" #include "absl/container/internal/have_sse.h" +#include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" @@ -57,7 +58,7 @@ namespace container_internal { // Stores information about a sampled hashtable. All mutations to this *must* // be made through `Record*` functions below. All reads from this *must* only // occur in the callback to `HashtablezSampler::Iterate`. -struct HashtablezInfo { +struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { // Constructs the object but does not fill in any fields. HashtablezInfo(); ~HashtablezInfo(); @@ -80,14 +81,6 @@ struct HashtablezInfo { std::atomic<size_t> hashes_bitwise_and; std::atomic<size_t> hashes_bitwise_xor; - // `HashtablezSampler` maintains intrusive linked lists for all samples. See - // comments on `HashtablezSampler::all_` for details on these. `init_mu` - // guards the ability to restore the sample to a pristine state. This - // prevents races with sampling and resurrecting an object. - absl::Mutex init_mu; - HashtablezInfo* next; - HashtablezInfo* dead ABSL_GUARDED_BY(init_mu); - // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. // These are guarded by init_mu, but that is not externalized to clients, who @@ -231,73 +224,11 @@ inline HashtablezInfoHandle Sample() { #endif // !ABSL_PER_THREAD_TLS } -// Holds samples and their associated stack traces with a soft limit of -// `SetHashtablezMaxSamples()`. -// -// Thread safe. -class HashtablezSampler { - public: - // Returns a global Sampler. - static HashtablezSampler& Global(); - - HashtablezSampler(); - ~HashtablezSampler(); - - // Registers for sampling. Returns an opaque registration info. - HashtablezInfo* Register(); +using HashtablezSampler = + ::absl::profiling_internal::SampleRecorder<HashtablezInfo>; - // Unregisters the sample. - void Unregister(HashtablezInfo* sample); - - // The dispose callback will be called on all samples the moment they are - // being unregistered. Only affects samples that are unregistered after the - // callback has been set. - // Returns the previous callback. - using DisposeCallback = void (*)(const HashtablezInfo&); - DisposeCallback SetDisposeCallback(DisposeCallback f); - - // Iterates over all the registered `StackInfo`s. Returning the number of - // samples that have been dropped. - int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f); - - private: - void PushNew(HashtablezInfo* sample); - void PushDead(HashtablezInfo* sample); - HashtablezInfo* PopDead(); - - std::atomic<size_t> dropped_samples_; - std::atomic<size_t> size_estimate_; - - // Intrusive lock free linked lists for tracking samples. - // - // `all_` records all samples (they are never removed from this list) and is - // terminated with a `nullptr`. - // - // `graveyard_.dead` is a circular linked list. When it is empty, - // `graveyard_.dead == &graveyard`. The list is circular so that - // every item on it (even the last) has a non-null dead pointer. This allows - // `Iterate` to determine if a given sample is live or dead using only - // information on the sample itself. - // - // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead - // looks like this (G is the Graveyard): - // - // +---+ +---+ +---+ +---+ +---+ - // all -->| A |--->| B |--->| C |--->| D |--->| E | - // | | | | | | | | | | - // +---+ | | +->| |-+ | | +->| |-+ | | - // | G | +---+ | +---+ | +---+ | +---+ | +---+ - // | | | | | | - // | | --------+ +--------+ | - // +---+ | - // ^ | - // +--------------------------------------+ - // - std::atomic<HashtablezInfo*> all_; - HashtablezInfo graveyard_; - - std::atomic<DisposeCallback> dispose_; -}; +// Returns a global Sampler. +HashtablezSampler& GlobalHashtablezSampler(); // Enables or disables sampling for Swiss tables. void SetHashtablezEnabled(bool enabled); diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index 5f4c83b7..53fcfe6f 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -22,6 +22,7 @@ #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/container/internal/have_sse.h" +#include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/internal/thread_pool.h" #include "absl/synchronization/mutex.h" @@ -232,7 +233,7 @@ TEST(HashtablezSamplerTest, Sample) { } TEST(HashtablezSamplerTest, Handle) { - auto& sampler = HashtablezSampler::Global(); + auto& sampler = GlobalHashtablezSampler(); HashtablezInfoHandle h(sampler.Register()); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 4fb31fad..4012a3aa 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -2038,7 +2038,7 @@ TEST(RawHashSamplerTest, Sample) { SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); - auto& sampler = HashtablezSampler::Global(); + auto& sampler = GlobalHashtablezSampler(); size_t start_size = 0; std::unordered_set<const HashtablezInfo*> preexisting_info; start_size += sampler.Iterate([&](const HashtablezInfo& info) { @@ -2076,7 +2076,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) { SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); - auto& sampler = HashtablezSampler::Global(); + auto& sampler = GlobalHashtablezSampler(); size_t start_size = 0; start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; }); |