summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-01-15 22:34:04 -0800
committerGravatar vslashg <gfalcon@google.com>2021-01-19 10:32:30 -0500
commitb2dcbba18341d75f3fef486b717585cefda0195d (patch)
tree5b578d44434a1db7622b8dfc876ca77c62f3ad91
parentff361eb3aac20f08ec7b1ccfdd3204b0aa6cbe33 (diff)
Export of internal Abseil changes
-- 874f906d2b90d4c74b0edeac52811efc10323422 by Todd Lipcon <tlipcon@google.com>: Internal change PiperOrigin-RevId: 352140672 GitOrigin-RevId: 874f906d2b90d4c74b0edeac52811efc10323422 Change-Id: I151299caff75d404309f059dd0e5971f5a71655d
-rw-r--r--absl/base/internal/spinlock.cc13
-rw-r--r--absl/base/internal/spinlock.h14
2 files changed, 24 insertions, 3 deletions
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc
index a7d44f3e..35c0696a 100644
--- a/absl/base/internal/spinlock.cc
+++ b/absl/base/internal/spinlock.cc
@@ -125,8 +125,9 @@ void SpinLock::SlowLock() {
// it as having a sleeper.
if ((lock_value & kWaitTimeMask) == 0) {
// Here, just "mark" that the thread is going to sleep. Don't store the
- // lock wait time in the lock as that will cause the current lock
- // owner to think it experienced contention.
+ // lock wait time in the lock -- the lock word stores the amount of time
+ // that the current holder waited before acquiring the lock, not the wait
+ // time of any thread currently waiting to acquire it.
if (lockword_.compare_exchange_strong(
lock_value, lock_value | kSpinLockSleeper,
std::memory_order_relaxed, std::memory_order_relaxed)) {
@@ -140,6 +141,14 @@ void SpinLock::SlowLock() {
// this thread obtains the lock.
lock_value = TryLockInternal(lock_value, wait_cycles);
continue; // Skip the delay at the end of the loop.
+ } else if ((lock_value & kWaitTimeMask) == 0) {
+ // The lock is still held, without a waiter being marked, but something
+ // else about the lock word changed, causing our CAS to fail. For
+ // example, a new lock holder may have acquired the lock with
+ // kSpinLockDisabledScheduling set, whereas the previous holder had not
+ // set that flag. In this case, attempt again to mark ourselves as a
+ // waiter.
+ continue;
}
}
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index dce1c854..c73b5e09 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -137,8 +137,20 @@ class ABSL_LOCKABLE SpinLock {
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
- // bit[2] encodes whether a lock disables scheduling.
+ // bit[2] encodes whether the current lock holder disabled scheduling when
+ // acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+ // This is set by the lock holder to indicate how long it waited on
+ // the lock before eventually acquiring it. The number of cycles is
+ // encoded as a 29-bit unsigned int, or in the case that the current
+ // holder did not wait but another waiter is queued, the LSB
+ // (kSpinLockSleeper) is set. The implementation does not explicitly
+ // track the number of queued waiters beyond this. It must always be
+ // assumed that waiters may exist if the current holder was required to
+ // queue.
+ //
+ // Invariant: if the lock is not held, the value is either 0 or
+ // kSpinLockCooperative.
static constexpr uint32_t kSpinLockHeld = 1;
static constexpr uint32_t kSpinLockCooperative = 2;
static constexpr uint32_t kSpinLockDisabledScheduling = 4;