summaryrefslogtreecommitdiff
path: root/absl/synchronization/mutex.cc
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2020-06-30 13:08:50 -0700
committerGravatar Derek Mauro <dmauro@google.com>2020-06-30 16:12:34 -0400
commit81f34df8347a73c617f244f49cb916238857dc34 (patch)
tree44d8278f3102055ab0c41059259bb3eacf112323 /absl/synchronization/mutex.cc
parentb86fff162e15ad8ee534c25e58bf522330e8376d (diff)
Export of internal Abseil changes
-- 739f9fb80212c21c015fec473e9e29803a156ef9 by Derek Mauro <dmauro@google.com>: Define FlagStateInterface::~FlagStateInterface() in the translation unit in which it is actually declared Fixes #717 PiperOrigin-RevId: 319083605 -- 913ef1f23113268b22d636d3ae3b992862efdb1a by Derek Mauro <dmauro@google.com>: Fix ABSL_LOCK_RETURNED statement PiperOrigin-RevId: 319078667 -- a43b1413da1770d638147c73e7e1693cfaf869c7 by Derek Mauro <dmauro@google.com>: Fix redeclaration ‘absl::Cord::InlineRep::kMaxInline’, which differs in ‘constexpr’ Fixes #725 PiperOrigin-RevId: 319060910 -- 1ad7d491a80f6c9de78a6fc20f09b7765d224503 by Abseil Team <absl-team@google.com>: Make absl SpinLock trivially destructible when possible PiperOrigin-RevId: 319049456 -- 659ecad3578dfa669854a82279fa590002bdb37f by Derek Mauro <dmauro@google.com>: Remove the static initialization of global variables used by absl::Mutex as requested by Chromium PiperOrigin-RevId: 319031204 -- 609c491d8bb4f8bb3b44c5a4c0bee51c583df24c by Abseil Team <absl-team@google.com>: Add implementation of %a and %A to absl::StrFormat. Prior to this it just fell back to sprintf. PiperOrigin-RevId: 318888039 -- 5e8ae6392bcd135248aac14c4b9f2a5116868678 by Abseil Team <absl-team@google.com>: Google-internal changes only. PiperOrigin-RevId: 318857077 -- 4a2578e33e8442954e29e5f0380ddfcf0f033f0d by Greg Falcon <gfalcon@google.com>: Change of enum constants to accommodate internal change. PiperOrigin-RevId: 318844716 -- 4b578b102816260c213675759f4c15911735578a by Abseil Team <absl-team@google.com>: Internal change PiperOrigin-RevId: 318704453 -- 0ee82fd24d548b260c9229fa1f54571dae1dfa24 by Gennadiy Rozental <rogeeff@google.com>: Allow lookup of retired flags. At the moment we issue warning on attempt to find a retired flag. This way we can't even check if flag is retired without issuing the warning. With this change we will only issue the warning if one tries to access any functionality of retired flag but it's name "is retired" status, and type. PiperOrigin-RevId: 318605017 -- 3e35fe9b4c79f636fa328c59e2aabb93e29b7c99 by Abseil Team <absl-team@google.com>: Fix error return from InstallSymbolDecorator(). PiperOrigin-RevId: 318490405 -- ae46063f3eb2998cb961f62a359d932e5908a4bc by Abseil Team <absl-team@google.com>: Do not make copies of iterated collection elements into the loop variable. PiperOrigin-RevId: 318423139 -- d06a075a12aab5f6ab98474677ce50d588b21de3 by Abseil Team <absl-team@google.com>: add missing word making the error code better English PiperOrigin-RevId: 318335052 GitOrigin-RevId: 739f9fb80212c21c015fec473e9e29803a156ef9 Change-Id: Id77a0a4b1959036b00555deef40e82d51884fbc1
Diffstat (limited to 'absl/synchronization/mutex.cc')
-rw-r--r--absl/synchronization/mutex.cc64
1 files changed, 27 insertions, 37 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 05f5c041..c7968f06 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -39,6 +39,7 @@
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
+#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/atomic_hook.h"
@@ -58,7 +59,6 @@
using absl::base_internal::CurrentThreadIdentityIfPresent;
using absl::base_internal::PerThreadSynch;
-using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using absl::synchronization_internal::GraphCycles;
@@ -86,28 +86,6 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
kDeadlockDetectionDefault);
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
-// ------------------------------------------ spinlock support
-
-// Make sure read-only globals used in the Mutex code are contained on the
-// same cacheline and cacheline aligned to eliminate any false sharing with
-// other globals from this and other modules.
-static struct MutexGlobals {
- MutexGlobals() {
- // Find machine-specific data needed for Delay() and
- // TryAcquireWithSpinning(). This runs in the global constructor
- // sequence, and before that zeros are safe values.
- num_cpus = absl::base_internal::NumCPUs();
- spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- }
- int num_cpus;
- int spinloop_iterations;
- // Pad this struct to a full cacheline to prevent false sharing.
- char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)];
-} ABSL_CACHELINE_ALIGNED mutex_globals;
-static_assert(
- sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE,
- "MutexGlobals must occupy an entire cacheline to prevent false sharing");
-
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
@@ -144,7 +122,22 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
symbolizer.Store(fn);
}
-// spinlock delay on iteration c. Returns new c.
+struct ABSL_CACHELINE_ALIGNED MutexGlobals {
+ absl::once_flag once;
+ int num_cpus = 0;
+ int spinloop_iterations = 0;
+};
+
+static const MutexGlobals& GetMutexGlobals() {
+ ABSL_CONST_INIT static MutexGlobals data;
+ absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
+ data.num_cpus = absl::base_internal::NumCPUs();
+ data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0;
+ });
+ return data;
+}
+
+// Spinlock delay on iteration c. Returns new c.
namespace {
enum DelayMode { AGGRESSIVE, GENTLE };
};
@@ -154,22 +147,25 @@ static int Delay(int32_t c, DelayMode mode) {
// gentle then spin only a few times before yielding. Aggressive spinning is
// used to ensure that an Unlock() call, which must get the spin lock for
// any thread to make progress gets it without undue delay.
- int32_t limit = (mutex_globals.num_cpus > 1) ?
- ((mode == AGGRESSIVE) ? 5000 : 250) : 0;
+ const int32_t limit =
+ GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0;
if (c < limit) {
- c++; // spin
+ // Spin.
+ c++;
} else {
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
- if (c == limit) { // yield once
+ if (c == limit) {
+ // Yield once.
AbslInternalMutexYield();
c++;
- } else { // then wait
+ } else {
+ // Then wait.
absl::SleepFor(absl::Microseconds(10));
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
- return (c);
+ return c;
}
// --------------------------Generic atomic ops
@@ -1055,7 +1051,6 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
void Mutex::TryRemove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
@@ -1439,7 +1434,7 @@ void Mutex::AssertNotHeld() const {
// Attempt to acquire *mu, and return whether successful. The implementation
// may spin for a short while if the lock cannot be acquired immediately.
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
- int c = mutex_globals.spinloop_iterations;
+ int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
if ((v & (kMuReader|kMuEvent)) != 0) {
@@ -1899,7 +1894,6 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
}
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
@@ -2019,7 +2013,6 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
@@ -2335,7 +2328,6 @@ void Mutex::Trans(MuHow how) {
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
void Mutex::Fer(PerThreadSynch *w) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
"Mutex::Fer while waiting on Condition");
@@ -2434,7 +2426,6 @@ CondVar::~CondVar() {
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) {
- SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
@@ -2595,7 +2586,6 @@ void CondVar::Wakeup(PerThreadSynch *w) {
}
void CondVar::Signal() {
- SchedulingGuard::ScopedDisable disable_rescheduling;
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;