summaryrefslogtreecommitdiff
path: root/absl
diff options
context:
space:
mode:
authorGravatar Dmitry Vyukov <dvyukov@google.com>2023-10-27 06:34:24 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2023-10-27 06:35:17 -0700
commit89d2caa104563ad8c2182282f5d4676f6747bd0e (patch)
tree6459f3877e4aa1ab75946529cfafcafe4f02f966 /absl
parentcdb1e7f4a60a6d0ee038f733776356a50121265a (diff)
Rollback "Mutex: Remove destructor in release build"
PiperOrigin-RevId: 577180526 Change-Id: Iec53709456805ca8dc5327669cc0f6c95825d0e9
Diffstat (limited to 'absl')
-rw-r--r--absl/synchronization/mutex.cc150
-rw-r--r--absl/synchronization/mutex.h11
-rw-r--r--absl/synchronization/mutex_test.cc27
3 files changed, 75 insertions, 113 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index c733ff4e..47032677 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -202,23 +202,31 @@ int MutexDelay(int32_t c, int mode) {
// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
// before making any change.
-// Returns true if bits were previously unset and set by the call.
// This is used to set flags in mutex and condition variable words.
-static bool AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
+static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
intptr_t wait_until_clear) {
- for (;;) {
- intptr_t v = pv->load(std::memory_order_relaxed);
- if ((v & bits) == bits) {
- return false;
- }
- if ((v & wait_until_clear) != 0) {
- continue;
- }
- if (pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
- std::memory_order_relaxed)) {
- return true;
- }
- }
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != bits &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
+ std::memory_order_relaxed)));
+}
+
+// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
+// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
+// before making any change.
+// This is used to unset flags in mutex and condition variable words.
+static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
+ intptr_t wait_until_clear) {
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != 0 &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
+ std::memory_order_relaxed)));
}
//------------------------------------------------------------------
@@ -329,49 +337,12 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
const char* name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
+ SynchEvent* e;
+ // first look for existing SynchEvent struct..
synch_event_mu.Lock();
- // When a Mutex/CondVar is destroyed, we don't remove the associated
- // SynchEvent to keep destructors empty in release builds for performance
- // reasons. If the current call is the first to set bits (kMuEvent/kCVEvent),
- // we don't look up the existing even because (if it exists, it must be for
- // the previous Mutex/CondVar that existed at the same address).
- // The leaking events must not be a problem for tests, which should create
- // bounded amount of events. And debug logging is not supposed to be enabled
- // in production. However, if it's accidentally enabled, or briefly enabled
- // for some debugging, we don't want to crash the program. Instead we drop
- // all events, if we accumulated too many of them. Size of a single event
- // is ~48 bytes, so 100K events is ~5 MB.
- // Additionally we could delete the old event for the same address,
- // but it would require a better hashmap (if we accumulate too many events,
- // linked lists will grow and traversing them will be very slow).
- constexpr size_t kMaxSynchEventCount = 100 << 10;
- // Total number of live synch events.
- static size_t synch_event_count ABSL_GUARDED_BY(synch_event_mu);
- if (++synch_event_count > kMaxSynchEventCount) {
- synch_event_count = 0;
- ABSL_RAW_LOG(ERROR,
- "Accumulated %zu Mutex debug objects. If you see this"
- " in production, it may mean that the production code"
- " accidentally calls "
- "Mutex/CondVar::EnableDebugLog/EnableInvariantDebugging.",
- kMaxSynchEventCount);
- for (auto*& head : synch_event) {
- for (auto* e = head; e != nullptr;) {
- SynchEvent* next = e->next;
- if (--(e->refcount) == 0) {
- base_internal::LowLevelAlloc::Free(e);
- }
- e = next;
- }
- head = nullptr;
- }
- }
- SynchEvent* e = nullptr;
- if (!AtomicSetBits(addr, bits, lockbit)) {
- for (e = synch_event[h];
- e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
- e = e->next) {
- }
+ for (e = synch_event[h];
+ e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ e = e->next) {
}
if (e == nullptr) { // no SynchEvent struct found; make one.
if (name == nullptr) {
@@ -387,6 +358,7 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
e->log = false;
strcpy(e->name, name); // NOLINT(runtime/printf)
e->next = synch_event[h];
+ AtomicSetBits(addr, bits, lockbit);
synch_event[h] = e;
} else {
e->refcount++; // for return value
@@ -395,6 +367,11 @@ static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
return e;
}
+// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
+static void DeleteSynchEvent(SynchEvent* e) {
+ base_internal::LowLevelAlloc::Free(e);
+}
+
// Decrement the reference count of *e, or do nothing if e==null.
static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) {
@@ -402,11 +379,36 @@ static void UnrefSynchEvent(SynchEvent* e) {
bool del = (--(e->refcount) == 0);
synch_event_mu.Unlock();
if (del) {
- base_internal::LowLevelAlloc::Free(e);
+ DeleteSynchEvent(e);
}
}
}
+// Forget the mapping from the object (Mutex or CondVar) at address addr
+// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
+// is clear before doing so).
+static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
+ intptr_t lockbit) {
+ uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
+ SynchEvent** pe;
+ SynchEvent* e;
+ synch_event_mu.Lock();
+ for (pe = &synch_event[h];
+ (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
+ pe = &e->next) {
+ }
+ bool del = false;
+ if (e != nullptr) {
+ *pe = e->next;
+ del = (--(e->refcount) == 0);
+ }
+ AtomicClearBits(addr, bits, lockbit);
+ synch_event_mu.Unlock();
+ if (del) {
+ DeleteSynchEvent(e);
+ }
+}
+
// Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
@@ -730,35 +732,25 @@ static unsigned TsanFlags(Mutex::MuHow how) {
}
#endif
-#if defined(__APPLE__) || defined(ABSL_BUILD_DLL)
-// When building a dll symbol export lists may reference the destructor
-// and want it to be an exported symbol rather than an inline function.
-// Some apple builds also do dynamic library build but don't say it explicitly.
-Mutex::~Mutex() { Dtor(); }
-#endif
+static bool DebugOnlyIsExiting() {
+ return false;
+}
-#if !defined(NDEBUG) || defined(ABSL_HAVE_THREAD_SANITIZER)
-void Mutex::Dtor() {
+Mutex::~Mutex() {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
+ ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
+ }
if (kDebugMode) {
this->ForgetDeadlockInfo();
}
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
-#endif
void Mutex::EnableDebugLog(const char* name) {
SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
- // This prevents "error: undefined symbol: absl::Mutex::~Mutex()"
- // in a release build (NDEBUG defined) when a test does "#undef NDEBUG"
- // to use assert macro. In such case, the test does not get the dtor
- // definition because it's supposed to be outline when NDEBUG is not defined,
- // and this source file does not define one either because NDEBUG is defined.
- // Since it's not possible to take address of a destructor, we move the
- // actual destructor code into the separate Dtor function and force the
- // compiler to emit this function even if it's inline by taking its address.
- ABSL_ATTRIBUTE_UNUSED volatile auto dtor = &Mutex::Dtor;
}
void EnableMutexInvariantDebugging(bool enabled) {
@@ -2478,6 +2470,12 @@ void CondVar::EnableDebugLog(const char* name) {
UnrefSynchEvent(e);
}
+CondVar::~CondVar() {
+ if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
+ ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
+ }
+}
+
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index d8264c03..95726f6b 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -536,7 +536,6 @@ class ABSL_LOCKABLE Mutex {
void Block(base_internal::PerThreadSynch* s);
// Wake a thread; return successor.
base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
- void Dtor();
friend class CondVar; // for access to Trans()/Fer().
void Trans(MuHow how); // used for CondVar->Mutex transfer
@@ -910,6 +909,7 @@ class CondVar {
// A `CondVar` allocated on the heap or on the stack can use the this
// constructor.
CondVar();
+ ~CondVar();
// CondVar::Wait()
//
@@ -1061,15 +1061,6 @@ inline Mutex::Mutex() : mu_(0) {
inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
-#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL)
-inline Mutex::~Mutex() { Dtor(); }
-#endif
-
-#if defined(NDEBUG) && !defined(ABSL_HAVE_THREAD_SANITIZER)
-// Use default (empty) destructor in release build for performance reasons.
-inline void Mutex::Dtor() {}
-#endif
-
inline CondVar::CondVar() : cv_(0) {}
// static
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 35b43334..6c38c07c 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -1709,33 +1709,6 @@ TEST(Mutex, Logging) {
logged_cv.SignalAll();
}
-TEST(Mutex, LoggingAddressReuse) {
- // Repeatedly re-create a Mutex with debug logging at the same address.
- alignas(absl::Mutex) char storage[sizeof(absl::Mutex)];
- auto invariant =
- +[](void *alive) { EXPECT_TRUE(*static_cast<bool *>(alive)); };
- constexpr size_t kIters = 10;
- bool alive[kIters] = {};
- for (size_t i = 0; i < kIters; ++i) {
- absl::Mutex *mu = new (storage) absl::Mutex;
- alive[i] = true;
- mu->EnableDebugLog("Mutex");
- mu->EnableInvariantDebugging(invariant, &alive[i]);
- mu->Lock();
- mu->Unlock();
- mu->~Mutex();
- alive[i] = false;
- }
-}
-
-TEST(Mutex, LoggingBankrupcy) {
- // Test the case with too many live Mutexes with debug logging.
- std::vector<absl::Mutex> mus(1 << 20);
- for (auto &mu : mus) {
- mu.EnableDebugLog("Mutex");
- }
-}
-
// --------------------------------------------------------
// Generate the vector of thread counts for tests parameterized on thread count.