summaryrefslogtreecommitdiff
path: root/absl
diff options
context:
space:
mode:
authorGravatar Dmitry Vyukov <dvyukov@google.com>2023-12-19 07:10:16 -0800
committerGravatar Copybara-Service <copybara-worker@google.com>2023-12-19 07:11:23 -0800
commite54fb4e1440c3c1d6933ceec2b36012228507719 (patch)
treea133a587cc0b9443c4e143d776710a93030d7f39 /absl
parentbae260199fffe782d5a5414bb80cfe49abb1436f (diff)
Mutex: Prevent false race in EnableInvariantDebugging.
The added test exposes a false TSan race report in EnableInvariantDebugging/EnableDebugLog related to SynchEvent reuse. We ignore most of the stuff that happens inside of the Mutex code, but not for the code inside of EnableInvariantDebugging/EnableDebugLog. So these can cause occasional false reports on SynchEvent bankruptcy. Also ignore accesses in EnableInvariantDebugging/EnableDebugLog. PiperOrigin-RevId: 592226791 Change-Id: I066edb1ef5661ba6cf86a195f91a9d5328b93d10
Diffstat (limited to 'absl')
-rw-r--r--absl/synchronization/mutex.cc10
-rw-r--r--absl/synchronization/mutex_test.cc36
2 files changed, 44 insertions, 2 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index cb751927..98574c0b 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -748,6 +748,13 @@ void Mutex::Dtor() {
#endif
void Mutex::EnableDebugLog(const char* name) {
+ // Need to disable writes here and in EnableInvariantDebugging to prevent
+ // false race reports on SynchEvent objects. TSan ignores synchronization
+ // on synch_event_mu in Lock/Unlock/etc methods due to mutex annotations,
+ // but it sees few accesses to SynchEvent in EvalConditionAnnotated.
+ // If we don't ignore accesses here, it can result in false races
+ // between EvalConditionAnnotated and SynchEvent reuse in EnsureSynchEvent.
+ ABSL_ANNOTATE_IGNORE_WRITES_BEGIN();
SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
@@ -760,6 +767,7 @@ void Mutex::EnableDebugLog(const char* name) {
// actual destructor code into the separate Dtor function and force the
// compiler to emit this function even if it's inline by taking its address.
ABSL_ATTRIBUTE_UNUSED volatile auto dtor = &Mutex::Dtor;
+ ABSL_ANNOTATE_IGNORE_WRITES_END();
}
void EnableMutexInvariantDebugging(bool enabled) {
@@ -767,6 +775,7 @@ void EnableMutexInvariantDebugging(bool enabled) {
}
void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
+ ABSL_ANNOTATE_IGNORE_WRITES_BEGIN();
if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) {
SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
@@ -774,6 +783,7 @@ void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
e->arg = arg;
UnrefSynchEvent(e);
}
+ ABSL_ANNOTATE_IGNORE_WRITES_END();
}
void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index a8d75827..bb93abc9 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -72,6 +72,11 @@ static void ScheduleAfter(absl::synchronization_internal::ThreadPool *tp,
});
}
+struct ScopedInvariantDebugging {
+ ScopedInvariantDebugging() { absl::EnableMutexInvariantDebugging(true); }
+ ~ScopedInvariantDebugging() { absl::EnableMutexInvariantDebugging(false); }
+};
+
struct TestContext {
int iterations;
int threads;
@@ -395,13 +400,12 @@ static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
int threads, int iterations,
int operations,
void (*invariant)(void *)) {
- absl::EnableMutexInvariantDebugging(true);
+ ScopedInvariantDebugging scoped_debugging;
SetInvariantChecked(false);
TestContext cxt;
cxt.mu.EnableInvariantDebugging(invariant, &cxt);
int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
CHECK(GetInvariantChecked()) << "Invariant not checked";
- absl::EnableMutexInvariantDebugging(false); // Restore.
return ret;
}
#endif
@@ -1720,6 +1724,7 @@ TEST(Mutex, Logging) {
TEST(Mutex, LoggingAddressReuse) {
// Repeatedly re-create a Mutex with debug logging at the same address.
+ ScopedInvariantDebugging scoped_debugging;
alignas(absl::Mutex) char storage[sizeof(absl::Mutex)];
auto invariant =
+[](void *alive) { EXPECT_TRUE(*static_cast<bool *>(alive)); };
@@ -1739,12 +1744,39 @@ TEST(Mutex, LoggingAddressReuse) {
TEST(Mutex, LoggingBankrupcy) {
// Test the case with too many live Mutexes with debug logging.
+ ScopedInvariantDebugging scoped_debugging;
std::vector<absl::Mutex> mus(1 << 20);
for (auto &mu : mus) {
mu.EnableDebugLog("Mutex");
}
}
+TEST(Mutex, SynchEventRace) {
+ // Regression test for a false TSan race report in
+ // EnableInvariantDebugging/EnableDebugLog related to SynchEvent reuse.
+ ScopedInvariantDebugging scoped_debugging;
+ std::vector<std::thread> threads;
+ for (size_t i = 0; i < 5; i++) {
+ threads.emplace_back([&] {
+ for (size_t j = 0; j < (1 << 17); j++) {
+ {
+ absl::Mutex mu;
+ mu.EnableInvariantDebugging([](void *) {}, nullptr);
+ mu.Lock();
+ mu.Unlock();
+ }
+ {
+ absl::Mutex mu;
+ mu.EnableDebugLog("Mutex");
+ }
+ }
+ });
+ }
+ for (auto &thread : threads) {
+ thread.join();
+ }
+}
+
// --------------------------------------------------------
// Generate the vector of thread counts for tests parameterized on thread count.