diff options
author | Abseil Team <absl-team@google.com> | 2022-01-18 13:02:48 -0800 |
---|---|---|
committer | dinord <dino.radakovich@gmail.com> | 2022-01-19 00:50:38 -0500 |
commit | 9bb42857112ad13b23de4333fbb75eb47db9de95 (patch) | |
tree | d892ca8ef4eb337cdd16a55332991c5c21d6feb7 /absl/container/internal/hashtablez_sampler.cc | |
parent | c59e7e59f5d29619ddc07fcb59be3dcba9585814 (diff) |
Export of internal Abseil changes
--
7f5caec21a1a88db6486b8bc2e5f6d5baba076ed by Derek Mauro <dmauro@google.com>:
Tag absl::StatusOr with [[nodiscard]] when it is available
[[nodiscard]] provides better diagnostics on classes than the current
ABSL_MUST_USE_RESULT, which expands to
__attribute__((warn_unused_result))
Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but
we would need to fix all code to build with [[nodiscard]] first.
PiperOrigin-RevId: 422628161
--
236be69f0f34ccaa3adc831075613847797e6557 by Jorg Brown <jorg@google.com>:
Make sure all of absl/strings compiles even with -Wsign-compare and -Wconversion warnings.
PiperOrigin-RevId: 422573904
--
005669883a8794e6d19dc4bbb4f0e18032e2fbc9 by Chris Kennelly <ckennelly@google.com>:
Include sampling stride in hashtable sampling data.
This allows us to weight each sample to control for our sampling rate.
PiperOrigin-RevId: 421886935
--
78046ce6b429b9f5b6c97b05e8448a791db93bbe by Abseil Team <absl-team@google.com>:
Use __builtin_bit_cast for absl::bit_cast when possible
This makes absl::bit_cast match the requirements of the standard when compiler support exists.
PiperOrigin-RevId: 421883999
--
f397461f4bbeabd32437df0f2275663aeb51adb2 by Derek Mauro <dmauro@google.com>:
Tag absl::Status with [[nodiscard]] when it is available
[[nodiscard]] provides better diagnostics on classes than the current
ABSL_MUST_USE_RESULT, which expands to
__attribute__((warn_unused_result))
Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but
we would need to fix all code to build with [[nodiscard]] first.
PiperOrigin-RevId: 421825565
GitOrigin-RevId: 7f5caec21a1a88db6486b8bc2e5f6d5baba076ed
Change-Id: I760b45b68f6012809c70c2a584e4144a99f98733
Diffstat (limited to 'absl/container/internal/hashtablez_sampler.cc')
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 34 |
1 files changed, 23 insertions, 11 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 1d24db5f..322e0547 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -27,6 +27,7 @@ #include "absl/profiling/internal/exponential_biased.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -53,7 +54,7 @@ void TriggerHashtablezConfigListener() { } // namespace #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; +ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) HashtablezSampler& GlobalHashtablezSampler() { @@ -64,7 +65,8 @@ HashtablezSampler& GlobalHashtablezSampler() { HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) { +void HashtablezInfo::PrepareForSampling(int64_t stride, + size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -77,6 +79,7 @@ void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) { max_reserve.store(0, std::memory_order_relaxed); create_time = absl::Now(); + weight = stride; // The inliner makes hardcoded skip_count difficult (especially when combined // with LTO). We use the ability to exclude stacks by regex when encoding // instead. @@ -105,23 +108,32 @@ static bool ShouldForceSampling() { return state == kForce; } -HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { - *next_sample = 1; + next_sample.next_sample = 1; + const int64_t old_stride = exchange(next_sample.sample_stride, 1); HashtablezInfo* result = - GlobalHashtablezSampler().Register(inline_element_size); + GlobalHashtablezSampler().Register(old_stride, inline_element_size); return result; } #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - *next_sample = std::numeric_limits<int64_t>::max(); + next_sample = { + std::numeric_limits<int64_t>::max(), + std::numeric_limits<int64_t>::max(), + }; return nullptr; #else - bool first = *next_sample < 0; - *next_sample = g_exponential_biased_generator.GetStride( + bool first = next_sample.next_sample < 0; + + const int64_t next_stride = g_exponential_biased_generator.GetStride( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); + + next_sample.next_sample = next_stride; + const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); // Small values of interval are equivalent to just sampling next time. - ABSL_ASSERT(*next_sample >= 1); + ABSL_ASSERT(next_stride >= 1); // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold // low enough that we will start sampling in a reasonable time, so we just use @@ -131,11 +143,11 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { // We will only be negative on our first count, so we should just retry in // that case. if (first) { - if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr; + if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; return SampleSlow(next_sample, inline_element_size); } - return GlobalHashtablezSampler().Register(inline_element_size); + return GlobalHashtablezSampler().Register(old_stride, inline_element_size); #endif } |