summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-12-01 15:08:07 -0800
committerGravatar Andy Getz <durandal@google.com>2021-12-02 14:11:24 -0500
commite11e039e02ef30a98a7928ce6c59cebe096dd753 (patch)
tree40e34c4abd819883b86b423ebe5ef77b960e82af
parentcad715dbf375a336fcb0b2b0ef57fc2b7a1b0b4b (diff)
Export of internal Abseil changes
-- 08d99ee216b7bfac1c5182db952d4e053e5ebc31 by Abseil Team <absl-team@google.com>: Fix race condition reported by tsan on `inline_element_size` in hashtablez. PiperOrigin-RevId: 413520771 GitOrigin-RevId: 08d99ee216b7bfac1c5182db952d4e053e5ebc31 Change-Id: Ibd396803f04a659cfbdb8dc7ec37511643657694
-rw-r--r--absl/container/internal/hashtablez_sampler.cc6
-rw-r--r--absl/container/internal/hashtablez_sampler.h19
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc14
-rw-r--r--absl/container/internal/raw_hash_set_test.cc3
-rw-r--r--absl/container/sample_element_size_test.cc3
5 files changed, 30 insertions, 15 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 1a3ca7cc..a3e15a70 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -111,7 +111,8 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
+ result->inline_element_size.store(inline_element_size,
+ std::memory_order_relaxed);
return result;
}
@@ -138,7 +139,8 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
}
HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
+ result->inline_element_size.store(inline_element_size,
+ std::memory_order_relaxed);
return result;
#endif
}
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index ee4d293e..0fd9349f 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -82,16 +82,27 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;
+ // One could imagine that inline_element_size could be non-atomic, since it
+ // *almost* follows the rules for the fields that are set by
+ // `PrepareForSampling`. However, TSAN reports a race (see b/207323922) in
+ // which
+ // A: Thread 1: Register() returns, unlocking init_mu.
+ // B: Thread 2: Iterate() is called, locking init_mu.
+ // C: Thread 1: inlined_element_size is stored.
+ // D: Thread 2: inlined_element_size is accessed (a race).
+ // A simple solution is to make inline_element_size atomic so that we treat it
+ // at as we do the other atomic fields.
+ std::atomic<size_t> inline_element_size;
+
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
- // These are guarded by init_mu, but that is not externalized to clients, who
- // can only read them during `HashtablezSampler::Iterate` which will hold the
- // lock.
+ // These are guarded by init_mu, but that is not externalized to clients,
+ // which can read them only during `SampleRecorder::Iterate` which will hold
+ // the lock.
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
- size_t inline_element_size;
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 449619a3..a7307a20 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -83,7 +83,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
- info.inline_element_size = test_element_size;
+ info.inline_element_size.store(test_element_size, std::memory_order_relaxed);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
- EXPECT_EQ(info.inline_element_size, test_element_size);
+ EXPECT_EQ(info.inline_element_size.load(), test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
@@ -119,7 +119,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
- EXPECT_EQ(info.inline_element_size, test_element_size);
+ EXPECT_EQ(info.inline_element_size.load(), test_element_size);
EXPECT_GE(info.create_time, test_start);
}
@@ -162,7 +162,7 @@ TEST(HashtablezInfoTest, RecordErase) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
- info.inline_element_size = test_element_size;
+ info.inline_element_size.store(test_element_size);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -170,7 +170,7 @@ TEST(HashtablezInfoTest, RecordErase) {
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
- EXPECT_EQ(info.inline_element_size, test_element_size);
+ EXPECT_EQ(info.inline_element_size.load(), test_element_size);
}
TEST(HashtablezInfoTest, RecordRehash) {
@@ -178,7 +178,7 @@ TEST(HashtablezInfoTest, RecordRehash) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
- info.inline_element_size = test_element_size;
+ info.inline_element_size.store(test_element_size);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -197,7 +197,7 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
- EXPECT_EQ(info.inline_element_size, test_element_size);
+ EXPECT_EQ(info.inline_element_size.load(), test_element_size);
}
TEST(HashtablezInfoTest, RecordReservation) {
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 362b3cae..4b9f6cc0 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -2075,7 +2075,8 @@ TEST(RawHashSamplerTest, Sample) {
std::memory_order_relaxed)]++;
reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
}
- EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
+ EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed),
+ sizeof(int64_t));
++end_size;
});
diff --git a/absl/container/sample_element_size_test.cc b/absl/container/sample_element_size_test.cc
index b23626b4..4bbef210 100644
--- a/absl/container/sample_element_size_test.cc
+++ b/absl/container/sample_element_size_test.cc
@@ -51,7 +51,8 @@ void TestInlineElementSize(
size_t new_count = 0;
sampler.Iterate([&](const HashtablezInfo& info) {
if (preexisting_info.insert(&info).second) {
- EXPECT_EQ(info.inline_element_size, expected_element_size);
+ EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed),
+ expected_element_size);
++new_count;
}
});