diff options
author | Abseil Team <absl-team@google.com> | 2020-09-18 15:55:15 -0700 |
---|---|---|
committer | Derek Mauro <dmauro@google.com> | 2020-09-24 13:47:15 -0400 |
commit | b56cbdd23834a65682c0b46f367f8679e83bc894 (patch) | |
tree | dacab9a64dd1a9e9668737e511d1a5420ff96001 /absl/base/internal/spinlock.cc | |
parent | b832dce8489ef7b6231384909fd9b68d5a5ff2b7 (diff) |
Abseil LTS 2020092320200923
What's New:
* `absl::StatusOr<T>` has been released. See our [blog
post](https://abseil.io/blog/2020-091021-status) for more
information.
* Abseil Flags reflection interfaces have been released.
* Abseil Flags memory usage has been significantly optimized.
* Abseil now supports a "hardened" build mode. This build mode enables
runtime checks that guard against programming errors that may lead
to security vulnerabilities.
Notable Fixes:
* Sanitizer dynamic annotations like `AnnotateRWLockCreate` that are
also defined by the compiler sanitizer implementation are no longer
also defined by Abseil.
* Sanitizer macros are now prefixed with `ABSL_` to avoid naming collisions.
* Sanitizer usage is now automatically detected and no longer requires
macros like `ADDRESS_SANITIZER` to be defined on the command line.
Breaking Changes:
* Abseil no longer contains a `dynamic_annotations` library. Users
using a supported build system (Bazel or CMake) are unaffected by
this, but users manually specifying link libraries may get an error
about a missing linker input.
Baseline: 7680a5f8efe32de4753baadbd63e74e59d95bac1
Cherry picks: None
Diffstat (limited to 'absl/base/internal/spinlock.cc')
-rw-r--r-- | absl/base/internal/spinlock.cc | 63 |
1 files changed, 25 insertions, 38 deletions
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc index 830d4729..a7d44f3e 100644 --- a/absl/base/internal/spinlock.cc +++ b/absl/base/internal/spinlock.cc @@ -66,35 +66,19 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, submit_profile_data.Store(fn); } +// Static member variable definitions. +constexpr uint32_t SpinLock::kSpinLockHeld; +constexpr uint32_t SpinLock::kSpinLockCooperative; +constexpr uint32_t SpinLock::kSpinLockDisabledScheduling; +constexpr uint32_t SpinLock::kSpinLockSleeper; +constexpr uint32_t SpinLock::kWaitTimeMask; + // Uncommon constructors. SpinLock::SpinLock(base_internal::SchedulingMode mode) : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); } -SpinLock::SpinLock(base_internal::LinkerInitialized, - base_internal::SchedulingMode mode) { - ABSL_TSAN_MUTEX_CREATE(this, 0); - if (IsCooperative(mode)) { - InitLinkerInitializedAndCooperative(); - } - // Otherwise, lockword_ is already initialized. -} - -// Static (linker initialized) spinlocks always start life as functional -// non-cooperative locks. When their static constructor does run, it will call -// this initializer to augment the lockword with the cooperative bit. By -// actually taking the lock when we do this we avoid the need for an atomic -// operation in the regular unlock path. -// -// SlowLock() must be careful to re-test for this bit so that any outstanding -// waiters may be upgraded to cooperative status. -void SpinLock::InitLinkerInitializedAndCooperative() { - Lock(); - lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed); - Unlock(); -} - // Monitor the lock to see if its value changes within some time period // (adaptive_spin_count loop iterations). The last value read from the lock // is returned from the method. @@ -121,6 +105,14 @@ void SpinLock::SlowLock() { if ((lock_value & kSpinLockHeld) == 0) { return; } + + base_internal::SchedulingMode scheduling_mode; + if ((lock_value & kSpinLockCooperative) != 0) { + scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } else { + scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + } + // The lock was not obtained initially, so this thread needs to wait for // it. Record the current timestamp in the local variable wait_start_time // so the total wait time can be stored in the lockword once this thread @@ -151,12 +143,6 @@ void SpinLock::SlowLock() { } } - base_internal::SchedulingMode scheduling_mode; - if ((lock_value & kSpinLockCooperative) != 0) { - scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; - } else { - scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; - } // SpinLockDelay() calls into fiber scheduler, we need to see // synchronization there to avoid false positives. ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); @@ -190,30 +176,32 @@ void SpinLock::SlowUnlock(uint32_t lock_value) { // We use the upper 29 bits of the lock word to store the time spent waiting to // acquire this lock. This is reported by contentionz profiling. Since the // lower bits of the cycle counter wrap very quickly on high-frequency -// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT +// processors we divide to reduce the granularity to 2^kProfileTimestampShift // sized units. On a 4Ghz machine this will lose track of wait times greater // than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. -enum { PROFILE_TIMESTAMP_SHIFT = 7 }; -enum { LOCKWORD_RESERVED_SHIFT = 3 }; // We currently reserve the lower 3 bits. +static constexpr int kProfileTimestampShift = 7; + +// We currently reserve the lower 3 bits. +static constexpr int kLockwordReservedShift = 3; uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time) { static const int64_t kMaxWaitTime = - std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT; + std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift; int64_t scaled_wait_time = - (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT; + (wait_end_time - wait_start_time) >> kProfileTimestampShift; // Return a representation of the time spent waiting that can be stored in // the lock word's upper bits. uint32_t clamped = static_cast<uint32_t>( - std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT); + std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift); if (clamped == 0) { return kSpinLockSleeper; // Just wake waiters, but don't record contention. } // Bump up value if necessary to avoid returning kSpinLockSleeper. const uint32_t kMinWaitTime = - kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT); + kSpinLockSleeper + (1 << kLockwordReservedShift); if (clamped == kSpinLockSleeper) { return kMinWaitTime; } @@ -224,8 +212,7 @@ uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { // Cast to uint32_t first to ensure bits [63:32] are cleared. const uint64_t scaled_wait_time = static_cast<uint32_t>(lock_value & kWaitTimeMask); - return scaled_wait_time - << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT); + return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift); } } // namespace base_internal |