diff options
Diffstat (limited to 'absl/synchronization/mutex.cc')
-rw-r--r-- | absl/synchronization/mutex.cc | 57 |
1 files changed, 27 insertions, 30 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 1f8a696e..62fa8e9c 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -39,6 +39,7 @@ #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/atomic_hook.h" @@ -85,28 +86,6 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection( kDeadlockDetectionDefault); ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false); -// ------------------------------------------ spinlock support - -// Make sure read-only globals used in the Mutex code are contained on the -// same cacheline and cacheline aligned to eliminate any false sharing with -// other globals from this and other modules. -static struct MutexGlobals { - MutexGlobals() { - // Find machine-specific data needed for Delay() and - // TryAcquireWithSpinning(). This runs in the global constructor - // sequence, and before that zeros are safe values. - num_cpus = absl::base_internal::NumCPUs(); - spinloop_iterations = num_cpus > 1 ? 1500 : 0; - } - int num_cpus; - int spinloop_iterations; - // Pad this struct to a full cacheline to prevent false sharing. - char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)]; -} ABSL_CACHELINE_ALIGNED mutex_globals; -static_assert( - sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE, - "MutexGlobals must occupy an entire cacheline to prevent false sharing"); - ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> submit_profile_data; @@ -143,7 +122,22 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) { symbolizer.Store(fn); } -// spinlock delay on iteration c. Returns new c. +struct ABSL_CACHELINE_ALIGNED MutexGlobals { + absl::once_flag once; + int num_cpus = 0; + int spinloop_iterations = 0; +}; + +static const MutexGlobals& GetMutexGlobals() { + ABSL_CONST_INIT static MutexGlobals data; + absl::base_internal::LowLevelCallOnce(&data.once, [&]() { + data.num_cpus = absl::base_internal::NumCPUs(); + data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0; + }); + return data; +} + +// Spinlock delay on iteration c. Returns new c. namespace { enum DelayMode { AGGRESSIVE, GENTLE }; }; @@ -153,22 +147,25 @@ static int Delay(int32_t c, DelayMode mode) { // gentle then spin only a few times before yielding. Aggressive spinning is // used to ensure that an Unlock() call, which must get the spin lock for // any thread to make progress gets it without undue delay. - int32_t limit = (mutex_globals.num_cpus > 1) ? - ((mode == AGGRESSIVE) ? 5000 : 250) : 0; + const int32_t limit = + GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0; if (c < limit) { - c++; // spin + // Spin. + c++; } else { ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0); - if (c == limit) { // yield once + if (c == limit) { + // Yield once. AbslInternalMutexYield(); c++; - } else { // then wait + } else { + // Then wait. absl::SleepFor(absl::Microseconds(10)); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); } - return (c); + return c; } // --------------------------Generic atomic ops @@ -1437,7 +1434,7 @@ void Mutex::AssertNotHeld() const { // Attempt to acquire *mu, and return whether successful. The implementation // may spin for a short while if the lock cannot be acquired immediately. static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { - int c = mutex_globals.spinloop_iterations; + int c = GetMutexGlobals().spinloop_iterations; do { // do/while somewhat faster on AMD intptr_t v = mu->load(std::memory_order_relaxed); if ((v & (kMuReader|kMuEvent)) != 0) { |