diff options
author | Dmitry Vyukov <dvyukov@google.com> | 2023-09-20 09:02:50 -0700 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2023-09-20 09:03:36 -0700 |
commit | 28549d18f7190c59fa5b9eaf4530e15f3f0d521f (patch) | |
tree | 259931b54b8dd5ac4a87a36a321e6ac343760e86 /absl/synchronization | |
parent | 556fcb57eb5f87e137c7db39b5a60ab3c4aa525f (diff) |
absl: speed up Mutex::ReaderLock/Unlock
Currently ReaderLock/Unlock tries CAS only once.
Even if there is moderate contention from other readers only,
ReaderLock/Unlock go onto slow path, which does lots of additional work
before retrying the CAS (since there are only readers, the slow path
logic is not really needed for anything).
Retry CAS while there are only readers.
name old cpu/op new cpu/op delta
BM_ReaderLock/real_time/threads:1 17.9ns ± 0% 17.9ns ± 0% ~ (p=0.071 n=5+5)
BM_ReaderLock/real_time/threads:72 11.4µs ± 3% 8.4µs ± 4% -26.24% (p=0.008 n=5+5)
PiperOrigin-RevId: 566981511
Change-Id: I432a3c1d85b84943d0ad4776a34fa5bfcf5b3b8e
Diffstat (limited to 'absl/synchronization')
-rw-r--r-- | absl/synchronization/mutex.cc | 35 |
1 files changed, 23 insertions, 12 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 5d1a516d..eb4b6e54 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -1523,12 +1523,19 @@ void Mutex::ReaderLock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); GraphId id = DebugOnlyDeadlockCheck(this); intptr_t v = mu_.load(std::memory_order_relaxed); - // try fast acquire, then slow loop - if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 || - !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne, - std::memory_order_acquire, - std::memory_order_relaxed)) { - this->LockSlow(kShared, nullptr, 0); + for (;;) { + // If there are non-readers holding the lock, use the slow loop. + if (ABSL_PREDICT_FALSE(v & (kMuWriter | kMuWait | kMuEvent)) != 0) { + this->LockSlow(kShared, nullptr, 0); + break; + } + // We can avoid the loop and only use the CAS when the lock is free or + // only held by readers. + if (ABSL_PREDICT_TRUE(mu_.compare_exchange_strong( + v, (kMuReader | v) + kMuOne, std::memory_order_acquire, + std::memory_order_relaxed))) { + break; + } } DebugOnlyLockEnter(this, id); ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0); @@ -1702,16 +1709,20 @@ void Mutex::ReaderUnlock() { DebugOnlyLockLeave(this); intptr_t v = mu_.load(std::memory_order_relaxed); assert((v & (kMuWriter | kMuReader)) == kMuReader); - if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) { + for (;;) { + if (ABSL_PREDICT_FALSE((v & (kMuReader | kMuWait | kMuEvent)) != + kMuReader)) { + this->UnlockSlow(nullptr /*no waitp*/); // take slow path + break; + } // fast reader release (reader with no waiters) intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne; - if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release, - std::memory_order_relaxed)) { - ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); - return; + if (ABSL_PREDICT_TRUE( + mu_.compare_exchange_strong(v, v - clear, std::memory_order_release, + std::memory_order_relaxed))) { + break; } } - this->UnlockSlow(nullptr /*no waitp*/); // take slow path ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); } |