summaryrefslogtreecommitdiff
path: root/absl/container/internal
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2022-01-18 13:02:48 -0800
committerGravatar dinord <dino.radakovich@gmail.com>2022-01-19 00:50:38 -0500
commit9bb42857112ad13b23de4333fbb75eb47db9de95 (patch)
treed892ca8ef4eb337cdd16a55332991c5c21d6feb7 /absl/container/internal
parentc59e7e59f5d29619ddc07fcb59be3dcba9585814 (diff)
Export of internal Abseil changes
-- 7f5caec21a1a88db6486b8bc2e5f6d5baba076ed by Derek Mauro <dmauro@google.com>: Tag absl::StatusOr with [[nodiscard]] when it is available [[nodiscard]] provides better diagnostics on classes than the current ABSL_MUST_USE_RESULT, which expands to __attribute__((warn_unused_result)) Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but we would need to fix all code to build with [[nodiscard]] first. PiperOrigin-RevId: 422628161 -- 236be69f0f34ccaa3adc831075613847797e6557 by Jorg Brown <jorg@google.com>: Make sure all of absl/strings compiles even with -Wsign-compare and -Wconversion warnings. PiperOrigin-RevId: 422573904 -- 005669883a8794e6d19dc4bbb4f0e18032e2fbc9 by Chris Kennelly <ckennelly@google.com>: Include sampling stride in hashtable sampling data. This allows us to weight each sample to control for our sampling rate. PiperOrigin-RevId: 421886935 -- 78046ce6b429b9f5b6c97b05e8448a791db93bbe by Abseil Team <absl-team@google.com>: Use __builtin_bit_cast for absl::bit_cast when possible This makes absl::bit_cast match the requirements of the standard when compiler support exists. PiperOrigin-RevId: 421883999 -- f397461f4bbeabd32437df0f2275663aeb51adb2 by Derek Mauro <dmauro@google.com>: Tag absl::Status with [[nodiscard]] when it is available [[nodiscard]] provides better diagnostics on classes than the current ABSL_MUST_USE_RESULT, which expands to __attribute__((warn_unused_result)) Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but we would need to fix all code to build with [[nodiscard]] first. PiperOrigin-RevId: 421825565 GitOrigin-RevId: 7f5caec21a1a88db6486b8bc2e5f6d5baba076ed Change-Id: I760b45b68f6012809c70c2a584e4144a99f98733
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/hashtablez_sampler.cc34
-rw-r--r--absl/container/internal/hashtablez_sampler.h18
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc51
3 files changed, 69 insertions, 34 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 1d24db5f..322e0547 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -27,6 +27,7 @@
#include "absl/profiling/internal/exponential_biased.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -53,7 +54,7 @@ void TriggerHashtablezConfigListener() {
} // namespace
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
HashtablezSampler& GlobalHashtablezSampler() {
@@ -64,7 +65,8 @@ HashtablezSampler& GlobalHashtablezSampler() {
HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
-void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) {
+void HashtablezInfo::PrepareForSampling(int64_t stride,
+ size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -77,6 +79,7 @@ void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) {
max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
+ weight = stride;
// The inliner makes hardcoded skip_count difficult (especially when combined
// with LTO). We use the ability to exclude stacks by regex when encoding
// instead.
@@ -105,23 +108,32 @@ static bool ShouldForceSampling() {
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
- *next_sample = 1;
+ next_sample.next_sample = 1;
+ const int64_t old_stride = exchange(next_sample.sample_stride, 1);
HashtablezInfo* result =
- GlobalHashtablezSampler().Register(inline_element_size);
+ GlobalHashtablezSampler().Register(old_stride, inline_element_size);
return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- *next_sample = std::numeric_limits<int64_t>::max();
+ next_sample = {
+ std::numeric_limits<int64_t>::max(),
+ std::numeric_limits<int64_t>::max(),
+ };
return nullptr;
#else
- bool first = *next_sample < 0;
- *next_sample = g_exponential_biased_generator.GetStride(
+ bool first = next_sample.next_sample < 0;
+
+ const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+ next_sample.next_sample = next_stride;
+ const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
// Small values of interval are equivalent to just sampling next time.
- ABSL_ASSERT(*next_sample >= 1);
+ ABSL_ASSERT(next_stride >= 1);
// g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
// low enough that we will start sampling in a reasonable time, so we just use
@@ -131,11 +143,11 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
// We will only be negative on our first count, so we should just retry in
// that case.
if (first) {
- if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+ if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
return SampleSlow(next_sample, inline_element_size);
}
- return GlobalHashtablezSampler().Register(inline_element_size);
+ return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
#endif
}
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 6738786c..e7c204ee 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -67,7 +67,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
- void PrepareForSampling(size_t inline_element_size_value)
+ void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
@@ -145,7 +145,15 @@ inline void RecordEraseSlow(HashtablezInfo* info) {
std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
+struct SamplingState {
+ int64_t next_sample;
+ // When we make a sampling decision, we record that distance so we can weight
+ // each sample.
+ int64_t sample_stride;
+};
+
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -235,7 +243,7 @@ class HashtablezInfoHandle {
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
@@ -243,11 +251,11 @@ extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
inline HashtablezInfoHandle Sample(
size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
return HashtablezInfoHandle(
- SampleSlow(&global_next_sample, inline_element_size));
+ SampleSlow(global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index ac6ed9b1..77cdf2fd 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -70,8 +70,9 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) {
}
HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
+ const int64_t test_stride = 123;
const size_t test_element_size = 17;
- auto* info = s->Register(test_element_size);
+ auto* info = s->Register(test_stride, test_element_size);
assert(info != nullptr);
info->size.store(size);
return info;
@@ -79,10 +80,11 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
+ const int64_t test_stride = 123;
const size_t test_element_size = 17;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling(test_element_size);
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
@@ -95,6 +97,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
+ EXPECT_EQ(info.weight, test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
@@ -108,7 +111,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
- info.PrepareForSampling(test_element_size);
+ info.PrepareForSampling(test_stride * 2, test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -119,6 +122,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
+ EXPECT_EQ(info.weight, 2 * test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start);
}
@@ -126,8 +130,9 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
TEST(HashtablezInfoTest, RecordStorageChanged) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
+ const int64_t test_stride = 21;
const size_t test_element_size = 19;
- info.PrepareForSampling(test_element_size);
+ info.PrepareForSampling(test_stride, test_element_size);
RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47);
@@ -139,8 +144,9 @@ TEST(HashtablezInfoTest, RecordStorageChanged) {
TEST(HashtablezInfoTest, RecordInsert) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
+ const int64_t test_stride = 25;
const size_t test_element_size = 23;
- info.PrepareForSampling(test_element_size);
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
@@ -160,10 +166,11 @@ TEST(HashtablezInfoTest, RecordInsert) {
}
TEST(HashtablezInfoTest, RecordErase) {
+ const int64_t test_stride = 31;
const size_t test_element_size = 29;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling(test_element_size);
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -175,10 +182,11 @@ TEST(HashtablezInfoTest, RecordErase) {
}
TEST(HashtablezInfoTest, RecordRehash) {
+ const int64_t test_stride = 33;
const size_t test_element_size = 31;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling(test_element_size);
+ info.PrepareForSampling(test_stride, test_element_size);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -203,8 +211,9 @@ TEST(HashtablezInfoTest, RecordRehash) {
TEST(HashtablezInfoTest, RecordReservation) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
+ const int64_t test_stride = 35;
const size_t test_element_size = 33;
- info.PrepareForSampling(test_element_size);
+ info.PrepareForSampling(test_stride, test_element_size);
RecordReservationSlow(&info, 3);
EXPECT_EQ(info.max_reserve.load(), 3);
@@ -224,9 +233,10 @@ TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
- int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
- EXPECT_GT(next_sample, 0);
+ SamplingState next_sample = {0, 0};
+ HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+ EXPECT_GT(next_sample.next_sample, 0);
+ EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@@ -238,9 +248,10 @@ TEST(HashtablezSamplerTest, LargeSampleParameter) {
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
- int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
- EXPECT_GT(next_sample, 0);
+ SamplingState next_sample = {0, 0};
+ HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+ EXPECT_GT(next_sample.next_sample, 0);
+ EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@@ -267,14 +278,16 @@ TEST(HashtablezSamplerTest, Sample) {
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = GlobalHashtablezSampler();
+ const int64_t test_stride = 41;
const size_t test_element_size = 39;
- HashtablezInfoHandle h(sampler.Register(test_element_size));
+ HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
bool found = false;
sampler.Iterate([&](const HashtablezInfo& h) {
if (&h == info) {
+ EXPECT_EQ(h.weight, test_stride);
EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
found = true;
}
@@ -340,19 +353,20 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
+ const int64_t sampling_stride = 11 + i % 3;
const size_t elt_size = 10 + i % 2;
- pool.Schedule([&sampler, &stop, elt_size]() {
+ pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
- infoz.push_back(sampler.Register(elt_size));
+ infoz.push_back(sampler.Register(sampling_stride, elt_size));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
- infoz.push_back(sampler.Register(elt_size));
+ infoz.push_back(sampler.Register(sampling_stride, elt_size));
break;
}
case 1: {
@@ -361,6 +375,7 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
HashtablezInfo* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
+ EXPECT_EQ(info->weight, sampling_stride);
sampler.Unregister(info);
break;
}