summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Dmitry Vyukov <dvyukov@google.com>2023-10-27 00:37:27 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2023-10-27 00:38:09 -0700
commitcdb1e7f4a60a6d0ee038f733776356a50121265a (patch)
treec677456c22d00eff7fbf5876a4313f165002de4b
parentc8087ae8bd1e8b25f4e2f38e903b1360005dc4e1 (diff)
Mutex: Remove destructor in release build
The Mutex destructor is needed only to clean up debug logging and invariant checking synch events. These are not supposed to be used in production, but the non-empty destructor has costs for production builds. Instead of removing synch events in destructor, drop all of them if we accumulated too many. For tests is should not matter (we maybe only consume a bit more memory). Production builds should be either unaffected (if don't use debug logging), or use periodic reset of all synch events. PiperOrigin-RevId: 577106805 Change-Id: Icaaf7166b99afcd5dce92b4acd1be661fb72f10b
-rw-r--r--absl/synchronization/mutex.cc150
-rw-r--r--absl/synchronization/mutex.h11
-rw-r--r--absl/synchronization/mutex_test.cc27
3 files changed, 113 insertions, 75 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 47032677..c733ff4e 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -202,31 +202,23 @@ int MutexDelay(int32_t c, int mode) {
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
// before making any change.
+// Returns true if bits were previously unset and set by the call.
// This is used to set flags in mutex and condition variable words.
-static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
+static bool AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
intptr_t wait_until_clear) {
- intptr_t v;
- do {
- v = pv->load(std::memory_order_relaxed);
- } while ((v & bits) != bits &&
- ((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
- std::memory_order_relaxed)));
-}
-
-// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
-// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
-// before making any change.
-// This is used to unset flags in mutex and condition variable words.
-static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
- intptr_t wait_until_clear) {
- intptr_t v;
- do {
- v = pv->load(std::memory_order_relaxed);
- } while ((v & bits) != 0 &&
- ((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
- std::memory_order_relaxed)));
+ for (;;) {
+ intptr_t v = pv->load(std::memory_order_relaxed);
+ if ((v & bits) == bits) {
+ return false;
+ }
+ if ((v & wait_until_clear) != 0) {
+ continue;
+ }
+ if (pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
+ std::memory_order_relaxed)) {
+ return true;
+ }
+ }
}
//------------------------------------------------------------------
@@ -337,12 +329,49 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
const char* name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent* e;
- // first look for existing SynchEvent struct..
synch_event_mu.Lock();
- for (e = synch_event[h];
- e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
- e = e->next) {
+ // When a Mutex/CondVar is destroyed, we don't remove the associated
+ // SynchEvent to keep destructors empty in release builds for performance
+ // reasons. If the current call is the first to set bits (kMuEvent/kCVEvent),
+ // we don't look up the existing even because (if it exists, it must be for
+ // the previous Mutex/CondVar that existed at the same address).
+ // The leaking events must not be a problem for tests, which should create
+ // bounded amount of events. And debug logging is not supposed to be enabled
+ // in production. However, if it's accidentally enabled, or briefly enabled
+ // for some debugging, we don't want to crash the program. Instead we drop
+ // all events, if we accumulated too many of them. Size of a single event
+ // is ~48 bytes, so 100K events is ~5 MB.
+ // Additionally we could delete the old event for the same address,
+ // but it would require a better hashmap (if we accumulate too many events,
+ // linked lists will grow and traversing them will be very slow).
+ constexpr size_t kMaxSynchEventCount = 100 << 10;
+ // Total number of live synch events.
+ static size_t synch_event_count ABSL_GUARDED_BY(synch_event_mu);
+ if (++synch_event_count > kMaxSynchEventCount) {
+ synch_event_count = 0;
+ ABSL_RAW_LOG(ERROR,
+ "Accumulated %zu Mutex debug objects. If you see this"
+ " in production, it may mean that the production code"
+ " accidentally calls "
+ "Mutex/CondVar::EnableDebugLog/EnableInvariantDebugging.",
+ kMaxSynchEventCount);
+ for (auto*& head : synch_event) {
+ for (auto* e = head; e != nullptr;) {
+ SynchEvent* next = e->next;
+ if (--(e->refcount) == 0) {
+ base_internal::LowLevelAlloc::Free(e);
+ }
+ e = next;
+ }
+ head = nullptr;
+ }
+ }
+ SynchEvent* e = nullptr;
+ if (!AtomicSetBits(addr, bits, lockbit)) {
+ for (e = synch_event[h];
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ e = e->next) {
+ }
}
if (e == nullptr) { // no SynchEvent struct found; make one.
if (name == nullptr) {
@@ -358,7 +387,6 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
e->log = false;
strcpy(e->name, name); // NOLINT(runtime/printf)
e->next = synch_event[h];
- AtomicSetBits(addr, bits, lockbit);
synch_event[h] = e;
} else {
e->refcount++; // for return value
@@ -367,11 +395,6 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
return e;
}
-// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent* e) {
- base_internal::LowLevelAlloc::Free(e);
-}
-
// Decrement the reference count of *e, or do nothing if e==null.
static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) {
@@ -379,36 +402,11 @@ static void UnrefSynchEvent(SynchEvent* e) {
bool del = (--(e->refcount) == 0);
synch_event_mu.Unlock();
if (del) {
- DeleteSynchEvent(e);
+ base_internal::LowLevelAlloc::Free(e);
}
}
}
-// Forget the mapping from the object (Mutex or CondVar) at address addr
-// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
-// is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
- intptr_t lockbit) {
- uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent** pe;
- SynchEvent* e;
- synch_event_mu.Lock();
- for (pe = &synch_event[h];
- (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
- pe = &e->next) {
- }
- bool del = false;
- if (e != nullptr) {
- *pe = e->next;
- del = (--(e->refcount) == 0);
- }
- AtomicClearBits(addr, bits, lockbit);
- synch_event_mu.Unlock();
- if (del) {
- DeleteSynchEvent(e);
- }
-}
-
// Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
@@ -732,25 +730,35 @@ static unsigned TsanFlags(Mutex::MuHow how) {
}
#endif
-static bool DebugOnlyIsExiting() {
- return false;
-}
+#if defined(__APPLE__) || defined(ABSL_BUILD_DLL)
+// When building a dll symbol export lists may reference the destructor
+// and want it to be an exported symbol rather than an inline function.
+// Some apple builds also do dynamic library build but don't say it explicitly.
+Mutex::~Mutex() { Dtor(); }
+#endif
-Mutex::~Mutex() {
- intptr_t v = mu_.load(std::memory_order_relaxed);
- if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
- ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
- }
+#if !defined(NDEBUG) || defined(ABSL_HAVE_THREAD_SANITIZER)
+void Mutex::Dtor() {
if (kDebugMode) {
this->ForgetDeadlockInfo();
}
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
+#endif
void Mutex::EnableDebugLog(const char* name) {
SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
+ // This prevents "error: undefined symbol: absl::Mutex::~Mutex()"
+ // in a release build (NDEBUG defined) when a test does "#undef NDEBUG"
+ // to use assert macro. In such case, the test does not get the dtor
+ // definition because it's supposed to be outline when NDEBUG is not defined,
+ // and this source file does not define one either because NDEBUG is defined.
+ // Since it's not possible to take address of a destructor, we move the
+ // actual destructor code into the separate Dtor function and force the
+ // compiler to emit this function even if it's inline by taking its address.
+ ABSL_ATTRIBUTE_UNUSED volatile auto dtor = &Mutex::Dtor;
}
void EnableMutexInvariantDebugging(bool enabled) {
@@ -2470,12 +2478,6 @@ void CondVar::EnableDebugLog(const char* name) {
UnrefSynchEvent(e);
}
-CondVar::~CondVar() {
- if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
- ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
- }
-}
-
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index 95726f6b..d8264c03 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -536,6 +536,7 @@ class ABSL_LOCKABLE Mutex {
void Block(base_internal::PerThreadSynch* s);
// Wake a thread; return successor.
base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
+ void Dtor();
friend class CondVar; // for access to Trans()/Fer().
void Trans(MuHow how); // used for CondVar->Mutex transfer
@@ -909,7 +910,6 @@ class CondVar {
// A `CondVar` allocated on the heap or on the stack can use the this
// constructor.
CondVar();
- ~CondVar();
// CondVar::Wait()
//
@@ -1061,6 +1061,15 @@ inline Mutex::Mutex() : mu_(0) {
inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
+#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
+inline Mutex::~Mutex() { Dtor(); }
+#endif
+
+#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
+// Use default (empty) destructor in release build for performance reasons.
+inline void Mutex::Dtor() {}
+#endif
+
inline CondVar::CondVar() : cv_(0) {}
// static
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 6c38c07c..35b43334 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -1709,6 +1709,33 @@ TEST(Mutex, Logging) {
logged_cv.SignalAll();
}
+TEST(Mutex, LoggingAddressReuse) {
+ // Repeatedly re-create a Mutex with debug logging at the same address.
+ alignas(absl::Mutex) char storage[sizeof(absl::Mutex)];
+ auto invariant =
+ +[](void *alive) { EXPECT_TRUE(*static_cast<bool *>(alive)); };
+ constexpr size_t kIters = 10;
+ bool alive[kIters] = {};
+ for (size_t i = 0; i < kIters; ++i) {
+ absl::Mutex *mu = new (storage) absl::Mutex;
+ alive[i] = true;
+ mu->EnableDebugLog("Mutex");
+ mu->EnableInvariantDebugging(invariant, &alive[i]);
+ mu->Lock();
+ mu->Unlock();
+ mu->~Mutex();
+ alive[i] = false;
+ }
+}
+
+TEST(Mutex, LoggingBankrupcy) {
+ // Test the case with too many live Mutexes with debug logging.
+ std::vector<absl::Mutex> mus(1 << 20);
+ for (auto &mu : mus) {
+ mu.EnableDebugLog("Mutex");
+ }
+}
+
// --------------------------------------------------------
// Generate the vector of thread counts for tests parameterized on thread count.