diff options
author | Abseil Team <absl-team@google.com> | 2020-06-22 10:03:06 -0700 |
---|---|---|
committer | Mark Barolak <mbar@google.com> | 2020-06-22 14:04:24 -0400 |
commit | 10cb35e459f5ecca5b2ff107635da0bfa41011b4 (patch) | |
tree | d382b5929817af05d27c0e3e23c800c84e80e892 /absl/synchronization/mutex.cc | |
parent | 4ccc0fce09836a25b474f4b1453146dae2c29f4d (diff) |
Export of internal Abseil changes
--
b548087c24ae7c2c709e8040a118b5e312d18e2e by Derek Mauro <dmauro@google.com>:
Remove the static initialization of global variables used by absl::Mutex
as requested by Chromium
PiperOrigin-RevId: 317676541
--
f198f5da1e966772efa978ba019bd23576899794 by Greg Miller <jgm@google.com>:
fix: work around gcc-4.8 bug in disjunction
See https://godbolt.org/z/i7-AmM for a repro of the bug.
I realize that Abseil no longer supports gcc 4.8 officially
(https://abseil.io/docs/cpp/platforms/platforms), but Cloud C++ still supports
gcc 4.8 officially, and so it would be nice to get this simple fix in.
fixes https://github.com/abseil/abseil-cpp/issues/718
PiperOrigin-RevId: 317484459
--
ed233f646530c6c0948213b643cc6919db1bee90 by Chris Kennelly <ckennelly@google.com>:
Avoid determining the size of the duration unit at runtime.
PiperOrigin-RevId: 317376300
--
73d4011c17fcf747a990176924a7adc69d443533 by Greg Falcon <gfalcon@google.com>:
Change spelling of internal detail from `Invoke`/`InvokeT` to `invoke`/`invoke_result_t`.
This matches the spelling of the C++17 standard library names that perform the same operations.
PiperOrigin-RevId: 317311527
GitOrigin-RevId: b548087c24ae7c2c709e8040a118b5e312d18e2e
Change-Id: I131809ff0b92cfdb0d96dc94e94d9c6f751cb0ac
Diffstat (limited to 'absl/synchronization/mutex.cc')
-rw-r--r-- | absl/synchronization/mutex.cc | 57 |
1 files changed, 27 insertions, 30 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 1f8a696e..62fa8e9c 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -39,6 +39,7 @@ #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/atomic_hook.h" @@ -85,28 +86,6 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection( kDeadlockDetectionDefault); ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false); -// ------------------------------------------ spinlock support - -// Make sure read-only globals used in the Mutex code are contained on the -// same cacheline and cacheline aligned to eliminate any false sharing with -// other globals from this and other modules. -static struct MutexGlobals { - MutexGlobals() { - // Find machine-specific data needed for Delay() and - // TryAcquireWithSpinning(). This runs in the global constructor - // sequence, and before that zeros are safe values. - num_cpus = absl::base_internal::NumCPUs(); - spinloop_iterations = num_cpus > 1 ? 1500 : 0; - } - int num_cpus; - int spinloop_iterations; - // Pad this struct to a full cacheline to prevent false sharing. - char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)]; -} ABSL_CACHELINE_ALIGNED mutex_globals; -static_assert( - sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE, - "MutexGlobals must occupy an entire cacheline to prevent false sharing"); - ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> submit_profile_data; @@ -143,7 +122,22 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) { symbolizer.Store(fn); } -// spinlock delay on iteration c. Returns new c. +struct ABSL_CACHELINE_ALIGNED MutexGlobals { + absl::once_flag once; + int num_cpus = 0; + int spinloop_iterations = 0; +}; + +static const MutexGlobals& GetMutexGlobals() { + ABSL_CONST_INIT static MutexGlobals data; + absl::base_internal::LowLevelCallOnce(&data.once, [&]() { + data.num_cpus = absl::base_internal::NumCPUs(); + data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0; + }); + return data; +} + +// Spinlock delay on iteration c. Returns new c. namespace { enum DelayMode { AGGRESSIVE, GENTLE }; }; @@ -153,22 +147,25 @@ static int Delay(int32_t c, DelayMode mode) { // gentle then spin only a few times before yielding. Aggressive spinning is // used to ensure that an Unlock() call, which must get the spin lock for // any thread to make progress gets it without undue delay. - int32_t limit = (mutex_globals.num_cpus > 1) ? - ((mode == AGGRESSIVE) ? 5000 : 250) : 0; + const int32_t limit = + GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0; if (c < limit) { - c++; // spin + // Spin. + c++; } else { ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0); - if (c == limit) { // yield once + if (c == limit) { + // Yield once. AbslInternalMutexYield(); c++; - } else { // then wait + } else { + // Then wait. absl::SleepFor(absl::Microseconds(10)); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); } - return (c); + return c; } // --------------------------Generic atomic ops @@ -1437,7 +1434,7 @@ void Mutex::AssertNotHeld() const { // Attempt to acquire *mu, and return whether successful. The implementation // may spin for a short while if the lock cannot be acquired immediately. static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { - int c = mutex_globals.spinloop_iterations; + int c = GetMutexGlobals().spinloop_iterations; do { // do/while somewhat faster on AMD intptr_t v = mu->load(std::memory_order_relaxed); if ((v & (kMuReader|kMuEvent)) != 0) { |