summaryrefslogtreecommitdiff
path: root/absl/container/internal
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-10-27 13:22:18 -0700
committerGravatar Andy Getz <durandal@google.com>2021-10-28 00:09:21 -0400
commitcc413f8b674d61e3aa948386432e526e051afca0 (patch)
tree87292c6510661a0443c346c7b8be360c824647a4 /absl/container/internal
parentf70eadadd7767c3a97774b63c4c23981fa89af9f (diff)
Export of internal Abseil changes
-- 05a099a580753f8e96cee38572e94dcdc079361b by Abseil Team <absl-team@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 405966217 -- c6b81e9ebc183d8389f14ecd091c8bad08cfe0aa by Abseil Team <absl-team@google.com>: Add `inline_element_size` to hashtablez (so that we can compute the weighted load factors properly e.g., in b/187896534). PiperOrigin-RevId: 405917711 -- 3e3673de4e54e4142c54b09e1644dfa3de4bb296 by Abseil Team <absl-team@google.com>: align indent of code comment in mutex.h PiperOrigin-RevId: 405871997 -- 2248301a5b14f8d2be5b2e9088f3528a353ea491 by Derek Mauro <dmauro@google.com>: Internal change PiperOrigin-RevId: 405639236 -- bc7d3c56fdad3dde4b89324af142529f2afe5f1b by Abseil Team <absl-team@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 405508045 -- 66472387276ef02505d99195747be862768bb35b by Laramie Leavitt <lar@google.com>: Also use uint8_t golden values in randen_test.cc This makes randen_test, randen_slow_test, and randen_hwaes_test essentially identical, as is the intent. PiperOrigin-RevId: 405484423 GitOrigin-RevId: 05a099a580753f8e96cee38572e94dcdc079361b Change-Id: I3dd5b0cfdb98d6e1ab02266194ba67d15428c2f8
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/hashtablez_sampler.cc15
-rw-r--r--absl/container/internal/hashtablez_sampler.h9
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc19
-rw-r--r--absl/container/internal/raw_hash_set.h2
-rw-r--r--absl/container/internal/raw_hash_set_test.cc1
5 files changed, 35 insertions, 11 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 7070912e..40cce047 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -55,6 +55,9 @@ HashtablezSampler& GlobalHashtablezSampler() {
return *sampler;
}
+// TODO(bradleybear): The comments at this constructors declaration say that the
+// fields are not initialized, but this definition does initialize the fields.
+// Something needs to be cleaned up.
HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
HashtablezInfo::~HashtablezInfo() = default;
@@ -98,10 +101,12 @@ static bool ShouldForceSampling() {
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample) {
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
- return GlobalHashtablezSampler().Register();
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -123,10 +128,12 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
// that case.
if (first) {
if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
- return SampleSlow(next_sample);
+ return SampleSlow(next_sample, inline_element_size);
}
- return GlobalHashtablezSampler().Register();
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
#endif
}
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 812118e3..91fcdb34 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -91,6 +91,7 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
+ size_t inline_element_size;
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
@@ -143,7 +144,7 @@ inline void RecordEraseSlow(HashtablezInfo* info) {
std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample);
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -238,12 +239,14 @@ extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
-inline HashtablezInfoHandle Sample() {
+inline HashtablezInfoHandle Sample(
+ size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
- return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+ return HashtablezInfoHandle(
+ SampleSlow(&global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index f053c19b..449619a3 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -78,10 +78,12 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
+ const size_t test_element_size = 17;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
+ info.inline_element_size = test_element_size;
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -93,6 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
@@ -116,6 +119,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start);
}
@@ -154,9 +158,11 @@ TEST(HashtablezInfoTest, RecordInsert) {
}
TEST(HashtablezInfoTest, RecordErase) {
+ const size_t test_element_size = 29;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
+ info.inline_element_size = test_element_size;
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -164,12 +170,15 @@ TEST(HashtablezInfoTest, RecordErase) {
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
}
TEST(HashtablezInfoTest, RecordRehash) {
+ const size_t test_element_size = 31;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
+ info.inline_element_size = test_element_size;
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -188,6 +197,7 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
}
TEST(HashtablezInfoTest, RecordReservation) {
@@ -208,12 +218,13 @@ TEST(HashtablezInfoTest, RecordReservation) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample);
+ HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
EXPECT_GT(next_sample, 0);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
@@ -221,12 +232,13 @@ TEST(HashtablezSamplerTest, SmallSampleParameter) {
}
TEST(HashtablezSamplerTest, LargeSampleParameter) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample);
+ HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
EXPECT_GT(next_sample, 0);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
@@ -234,13 +246,14 @@ TEST(HashtablezSamplerTest, LargeSampleParameter) {
}
TEST(HashtablezSamplerTest, Sample) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
int64_t num_sampled = 0;
int64_t total = 0;
double sample_rate = 0.0;
for (int i = 0; i < 1000000; ++i) {
- HashtablezInfoHandle h = Sample();
+ HashtablezInfoHandle h = Sample(test_element_size);
++total;
if (HashtablezInfoHandlePeer::IsSampled(h)) {
++num_sampled;
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 47e5a228..12682b35 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -1643,7 +1643,7 @@ class raw_hash_set {
// bound more carefully.
if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
slots_ == nullptr) {
- infoz() = Sample();
+ infoz() = Sample(sizeof(slot_type));
}
char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index b46c4920..362b3cae 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -2075,6 +2075,7 @@ TEST(RawHashSamplerTest, Sample) {
std::memory_order_relaxed)]++;
reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
}
+ EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
++end_size;
});