summaryrefslogtreecommitdiff
path: root/absl/synchronization
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2020-06-24 12:46:11 -0700
committerGravatar Mark Barolak <mbar@google.com>2020-06-24 16:12:47 -0400
commitb86fff162e15ad8ee534c25e58bf522330e8376d (patch)
treed6af92a7f4307c1bb433b59515a034cba7eade8b /absl/synchronization
parent10cb35e459f5ecca5b2ff107635da0bfa41011b4 (diff)
Export of internal Abseil changes
-- 517dc7eba9878290fc93c6523dc6d6e2c49b8c1e by Abseil Team <absl-team@google.com>: Handle 'nan' in HumanReadableInt::ToInt64. PiperOrigin-RevId: 318121226 -- 4fa6103b46dbc1375d416810dba591ab4f4898d1 by Abseil Team <absl-team@google.com>: Promote `float`s to `double`s before formatting. This will bring StrFormat behavior more in line with that of sprintf. PiperOrigin-RevId: 318091841 -- e4083a4b64b077d8520ccd8e0ca90da24f4ea24d by Abseil Team <absl-team@google.com>: Google-internal changes only. PiperOrigin-RevId: 318069900 -- ab24f60127db089bccaf5fb7d2d1967c76f34768 by Greg Falcon <gfalcon@google.com>: Inclusive language fix: Stop using blacklist/whitelist terminology in Abseil. PiperOrigin-RevId: 317925379 -- 52b56a27a773ea7c10ea8fd456ed606b9d778947 by Abseil Team <absl-team@google.com>: Add test for floats and label tests for double properly. PiperOrigin-RevId: 317916380 -- 0f405960ab5b34d673244fda8bb9141c8f5c1a4f by Abseil Team <absl-team@google.com>: Remove the static initialization of global variables used by absl::Mutex as requested by Chromium PiperOrigin-RevId: 317911430 -- ce8cb49d8f4f68950fd111682657ba57d5a09ca0 by Abseil Team <absl-team@google.com>: Internal Change PiperOrigin-RevId: 317864956 -- a781948e09fb8406d21a494b2fa4f78edaf9c2ea by Abseil Team <absl-team@google.com>: Swap ABSL_DLL / ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES order to fix clang-cl builds. PiperOrigin-RevId: 317858053 -- 4ba197f0dd2cbf8f9b96cebffd878c8847d6ce8d by Gennadiy Rozental <rogeeff@google.com>: Migrate use of annotation macros to ABSL namespaced names PiperOrigin-RevId: 317792057 GitOrigin-RevId: 517dc7eba9878290fc93c6523dc6d6e2c49b8c1e Change-Id: I1a0c04d01adbdc5106b68fd69c97c31f822e11dc
Diffstat (limited to 'absl/synchronization')
-rw-r--r--absl/synchronization/internal/per_thread_sem.h2
-rw-r--r--absl/synchronization/mutex.cc68
2 files changed, 40 insertions, 30 deletions
diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h
index 8ab43915..2228b6e8 100644
--- a/absl/synchronization/internal/per_thread_sem.h
+++ b/absl/synchronization/internal/per_thread_sem.h
@@ -78,7 +78,7 @@ class PerThreadSem {
// !t.has_timeout() => Wait(t) will return true.
static inline bool Wait(KernelTimeout t);
- // White-listed callers.
+ // Permitted callers.
friend class PerThreadSemTest;
friend class absl::Mutex;
friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 62fa8e9c..05f5c041 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -39,7 +39,6 @@
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
-#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/atomic_hook.h"
@@ -59,6 +58,7 @@
using absl::base_internal::CurrentThreadIdentityIfPresent;
using absl::base_internal::PerThreadSynch;
+using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using absl::synchronization_internal::GraphCycles;
@@ -86,6 +86,28 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
kDeadlockDetectionDefault);
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
+// ------------------------------------------ spinlock support
+
+// Make sure read-only globals used in the Mutex code are contained on the
+// same cacheline and cacheline aligned to eliminate any false sharing with
+// other globals from this and other modules.
+static struct MutexGlobals {
+ MutexGlobals() {
+ // Find machine-specific data needed for Delay() and
+ // TryAcquireWithSpinning(). This runs in the global constructor
+ // sequence, and before that zeros are safe values.
+ num_cpus = absl::base_internal::NumCPUs();
+ spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ }
+ int num_cpus;
+ int spinloop_iterations;
+ // Pad this struct to a full cacheline to prevent false sharing.
+ char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)];
+} ABSL_CACHELINE_ALIGNED mutex_globals;
+static_assert(
+ sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE,
+ "MutexGlobals must occupy an entire cacheline to prevent false sharing");
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
@@ -122,22 +144,7 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
symbolizer.Store(fn);
}
-struct ABSL_CACHELINE_ALIGNED MutexGlobals {
- absl::once_flag once;
- int num_cpus = 0;
- int spinloop_iterations = 0;
-};
-
-static const MutexGlobals& GetMutexGlobals() {
- ABSL_CONST_INIT static MutexGlobals data;
- absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- data.num_cpus = absl::base_internal::NumCPUs();
- data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0;
- });
- return data;
-}
-
-// Spinlock delay on iteration c. Returns new c.
+// spinlock delay on iteration c. Returns new c.
namespace {
enum DelayMode { AGGRESSIVE, GENTLE };
};
@@ -147,25 +154,22 @@ static int Delay(int32_t c, DelayMode mode) {
// gentle then spin only a few times before yielding. Aggressive spinning is
// used to ensure that an Unlock() call, which must get the spin lock for
// any thread to make progress gets it without undue delay.
- const int32_t limit =
- GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0;
+ int32_t limit = (mutex_globals.num_cpus > 1) ?
+ ((mode == AGGRESSIVE) ? 5000 : 250) : 0;
if (c < limit) {
- // Spin.
- c++;
+ c++; // spin
} else {
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
- if (c == limit) {
- // Yield once.
+ if (c == limit) { // yield once
AbslInternalMutexYield();
c++;
- } else {
- // Then wait.
+ } else { // then wait
absl::SleepFor(absl::Microseconds(10));
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
- return c;
+ return (c);
}
// --------------------------Generic atomic ops
@@ -1051,6 +1055,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
void Mutex::TryRemove(PerThreadSynch *s) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
@@ -1434,7 +1439,7 @@ void Mutex::AssertNotHeld() const {
// Attempt to acquire *mu, and return whether successful. The implementation
// may spin for a short while if the lock cannot be acquired immediately.
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
- int c = GetMutexGlobals().spinloop_iterations;
+ int c = mutex_globals.spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
if ((v & (kMuReader|kMuEvent)) != 0) {
@@ -1811,9 +1816,9 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// So we "divert" (which un-ignores both memory accesses and synchronization)
// and then separately turn on ignores of memory accesses.
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
bool res = cond->Eval();
- ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
return res;
@@ -1894,6 +1899,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
}
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
@@ -2013,6 +2019,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
@@ -2328,6 +2335,7 @@ void Mutex::Trans(MuHow how) {
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
void Mutex::Fer(PerThreadSynch *w) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
"Mutex::Fer while waiting on Condition");
@@ -2426,6 +2434,7 @@ CondVar::~CondVar() {
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
@@ -2586,6 +2595,7 @@ void CondVar::Wakeup(PerThreadSynch *w) {
}
void CondVar::Signal() {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;