diff options
author | Abseil Team <absl-team@google.com> | 2020-09-18 15:55:15 -0700 |
---|---|---|
committer | Derek Mauro <dmauro@google.com> | 2020-09-24 13:47:15 -0400 |
commit | b56cbdd23834a65682c0b46f367f8679e83bc894 (patch) | |
tree | dacab9a64dd1a9e9668737e511d1a5420ff96001 /absl/synchronization | |
parent | b832dce8489ef7b6231384909fd9b68d5a5ff2b7 (diff) |
Abseil LTS 2020092320200923
What's New:
* `absl::StatusOr<T>` has been released. See our [blog
post](https://abseil.io/blog/2020-091021-status) for more
information.
* Abseil Flags reflection interfaces have been released.
* Abseil Flags memory usage has been significantly optimized.
* Abseil now supports a "hardened" build mode. This build mode enables
runtime checks that guard against programming errors that may lead
to security vulnerabilities.
Notable Fixes:
* Sanitizer dynamic annotations like `AnnotateRWLockCreate` that are
also defined by the compiler sanitizer implementation are no longer
also defined by Abseil.
* Sanitizer macros are now prefixed with `ABSL_` to avoid naming collisions.
* Sanitizer usage is now automatically detected and no longer requires
macros like `ADDRESS_SANITIZER` to be defined on the command line.
Breaking Changes:
* Abseil no longer contains a `dynamic_annotations` library. Users
using a supported build system (Bazel or CMake) are unaffected by
this, but users manually specifying link libraries may get an error
about a missing linker input.
Baseline: 7680a5f8efe32de4753baadbd63e74e59d95bac1
Cherry picks: None
Diffstat (limited to 'absl/synchronization')
-rw-r--r-- | absl/synchronization/BUILD.bazel | 6 | ||||
-rw-r--r-- | absl/synchronization/CMakeLists.txt | 2 | ||||
-rw-r--r-- | absl/synchronization/internal/create_thread_identity.cc | 6 | ||||
-rw-r--r-- | absl/synchronization/internal/graphcycles.cc | 6 | ||||
-rw-r--r-- | absl/synchronization/internal/kernel_timeout.h | 56 | ||||
-rw-r--r-- | absl/synchronization/internal/mutex_nonprod.cc | 7 | ||||
-rw-r--r-- | absl/synchronization/internal/mutex_nonprod.inc | 20 | ||||
-rw-r--r-- | absl/synchronization/internal/per_thread_sem.h | 2 | ||||
-rw-r--r-- | absl/synchronization/internal/per_thread_sem_test.cc | 1 | ||||
-rw-r--r-- | absl/synchronization/internal/waiter.cc | 8 | ||||
-rw-r--r-- | absl/synchronization/internal/waiter.h | 4 | ||||
-rw-r--r-- | absl/synchronization/mutex.cc | 138 | ||||
-rw-r--r-- | absl/synchronization/mutex.h | 21 | ||||
-rw-r--r-- | absl/synchronization/mutex_benchmark.cc | 3 | ||||
-rw-r--r-- | absl/synchronization/mutex_test.cc | 9 |
15 files changed, 159 insertions, 130 deletions
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel index 3f876b9f..4d4d6806 100644 --- a/absl/synchronization/BUILD.bazel +++ b/absl/synchronization/BUILD.bazel @@ -24,7 +24,7 @@ load( package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) # Apache 2.0 +licenses(["notice"]) # Internal data structure for efficiently detecting mutex dependency cycles cc_library( @@ -90,6 +90,7 @@ cc_library( copts = ABSL_DEFAULT_COPTS, linkopts = select({ "//absl:windows": [], + "//absl:wasm": [], "//conditions:default": ["-pthread"], }) + ABSL_DEFAULT_LINKOPTS, deps = [ @@ -189,6 +190,7 @@ cc_test( ":synchronization", ":thread_pool", "//absl/base", + "//absl/base:config", "//absl/base:core_headers", "//absl/base:raw_logging_internal", "//absl/memory", @@ -210,6 +212,7 @@ cc_library( ":synchronization", ":thread_pool", "//absl/base", + "//absl/base:config", "@com_github_google_benchmark//:benchmark_main", ], alwayslink = 1, @@ -248,6 +251,7 @@ cc_library( deps = [ ":synchronization", "//absl/base", + "//absl/base:config", "//absl/strings", "//absl/time", "@com_google_googletest//:gtest", diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt index dfe5d05d..e5bc52fb 100644 --- a/absl/synchronization/CMakeLists.txt +++ b/absl/synchronization/CMakeLists.txt @@ -149,6 +149,7 @@ absl_cc_test( absl::synchronization absl::thread_pool absl::base + absl::config absl::core_headers absl::memory absl::raw_logging_internal @@ -179,6 +180,7 @@ absl_cc_library( DEPS absl::synchronization absl::base + absl::config absl::strings absl::time gmock diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc index fa0070a9..53a71b34 100644 --- a/absl/synchronization/internal/create_thread_identity.cc +++ b/absl/synchronization/internal/create_thread_identity.cc @@ -32,9 +32,9 @@ namespace synchronization_internal { // ThreadIdentity storage is persistent, we maintain a free-list of previously // released ThreadIdentity objects. -static base_internal::SpinLock freelist_lock( - base_internal::kLinkerInitialized); -static base_internal::ThreadIdentity* thread_identity_freelist; +ABSL_CONST_INIT static base_internal::SpinLock freelist_lock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); +ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist; // A per-thread destructor for reclaiming associated ThreadIdentity objects. // Since we must preserve their storage we cache them for re-use. diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc index 6a2bcdf6..19f9aab5 100644 --- a/absl/synchronization/internal/graphcycles.cc +++ b/absl/synchronization/internal/graphcycles.cc @@ -51,9 +51,9 @@ namespace { // Avoid LowLevelAlloc's default arena since it calls malloc hooks in // which people are doing things like acquiring Mutexes. -static absl::base_internal::SpinLock arena_mu( - absl::base_internal::kLinkerInitialized); -static base_internal::LowLevelAlloc::Arena* arena; +ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); +ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena; static void InitArenaIfNecessary() { arena_mu.Lock(); diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h index d6ac5db0..1084e1e6 100644 --- a/absl/synchronization/internal/kernel_timeout.h +++ b/absl/synchronization/internal/kernel_timeout.h @@ -57,6 +57,10 @@ class KernelTimeout { bool has_timeout() const { return ns_ != 0; } + // Convert to parameter for sem_timedwait/futex/similar. Only for approved + // users. Do not call if !has_timeout. + struct timespec MakeAbsTimespec(); + private: // internal rep, not user visible: ns after unix epoch. // zero = no timeout. @@ -82,34 +86,6 @@ class KernelTimeout { return x; } - // Convert to parameter for sem_timedwait/futex/similar. Only for approved - // users. Do not call if !has_timeout. - struct timespec MakeAbsTimespec() { - int64_t n = ns_; - static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; - if (n == 0) { - ABSL_RAW_LOG( - ERROR, - "Tried to create a timespec from a non-timeout; never do this."); - // But we'll try to continue sanely. no-timeout ~= saturated timeout. - n = (std::numeric_limits<int64_t>::max)(); - } - - // Kernel APIs validate timespecs as being at or after the epoch, - // despite the kernel time type being signed. However, no one can - // tell the difference between a timeout at or before the epoch (since - // all such timeouts have expired!) - if (n < 0) n = 0; - - struct timespec abstime; - int64_t seconds = (std::min)(n / kNanosPerSecond, - int64_t{(std::numeric_limits<time_t>::max)()}); - abstime.tv_sec = static_cast<time_t>(seconds); - abstime.tv_nsec = - static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond); - return abstime; - } - #ifdef _WIN32 // Converts to milliseconds from now, or INFINITE when // !has_timeout(). For use by SleepConditionVariableSRW on @@ -148,6 +124,30 @@ class KernelTimeout { friend class Waiter; }; +inline struct timespec KernelTimeout::MakeAbsTimespec() { + int64_t n = ns_; + static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; + if (n == 0) { + ABSL_RAW_LOG( + ERROR, "Tried to create a timespec from a non-timeout; never do this."); + // But we'll try to continue sanely. no-timeout ~= saturated timeout. + n = (std::numeric_limits<int64_t>::max)(); + } + + // Kernel APIs validate timespecs as being at or after the epoch, + // despite the kernel time type being signed. However, no one can + // tell the difference between a timeout at or before the epoch (since + // all such timeouts have expired!) + if (n < 0) n = 0; + + struct timespec abstime; + int64_t seconds = (std::min)(n / kNanosPerSecond, + int64_t{(std::numeric_limits<time_t>::max)()}); + abstime.tv_sec = static_cast<time_t>(seconds); + abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond); + return abstime; +} + } // namespace synchronization_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/synchronization/internal/mutex_nonprod.cc b/absl/synchronization/internal/mutex_nonprod.cc index 4590b98d..334c3bc0 100644 --- a/absl/synchronization/internal/mutex_nonprod.cc +++ b/absl/synchronization/internal/mutex_nonprod.cc @@ -27,11 +27,16 @@ #include <algorithm> +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/time/time.h" namespace absl { ABSL_NAMESPACE_BEGIN + +void SetMutexDeadlockDetectionMode(OnDeadlockCycle) {} +void EnableMutexInvariantDebugging(bool) {} + namespace synchronization_internal { namespace { @@ -274,7 +279,7 @@ bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) { void CondVar::EnableDebugLog(const char*) {} -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER extern "C" void __tsan_read1(void *addr); #else #define __tsan_read1(addr) // do nothing if TSan not enabled diff --git a/absl/synchronization/internal/mutex_nonprod.inc b/absl/synchronization/internal/mutex_nonprod.inc index a1502e72..d83bc8a9 100644 --- a/absl/synchronization/internal/mutex_nonprod.inc +++ b/absl/synchronization/internal/mutex_nonprod.inc @@ -209,31 +209,22 @@ class SynchronizationStorage { // Instances allocated on the heap or on the stack should use the default // constructor. SynchronizationStorage() - : is_dynamic_(true), once_() {} - - // Instances allocated in static storage (not on the heap, not on the - // stack) should use this constructor. - explicit SynchronizationStorage(base_internal::LinkerInitialized) {} + : destruct_(true), once_() {} constexpr explicit SynchronizationStorage(absl::ConstInitType) - : is_dynamic_(false), once_(), space_{{0}} {} + : destruct_(false), once_(), space_{{0}} {} SynchronizationStorage(SynchronizationStorage&) = delete; SynchronizationStorage& operator=(SynchronizationStorage&) = delete; ~SynchronizationStorage() { - if (is_dynamic_) { + if (destruct_) { get()->~T(); } } // Retrieve the object in storage. This is fast and thread safe, but does // incur the cost of absl::call_once(). - // - // For instances in static storage constructed with the - // LinkerInitialized constructor, may be called at any time without - // regard for order of dynamic initialization or destruction of objects - // in static storage. See the class comment for caveats. T* get() { absl::call_once(once_, SynchronizationStorage::Construct, this); return reinterpret_cast<T*>(&space_); @@ -245,10 +236,7 @@ class SynchronizationStorage { } // When true, T's destructor is run when this is destructed. - // - // The LinkerInitialized constructor assumes this value will be set - // false by static initialization. - bool is_dynamic_; + const bool destruct_; absl::once_flag once_; diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h index 8ab43915..2228b6e8 100644 --- a/absl/synchronization/internal/per_thread_sem.h +++ b/absl/synchronization/internal/per_thread_sem.h @@ -78,7 +78,7 @@ class PerThreadSem { // !t.has_timeout() => Wait(t) will return true. static inline bool Wait(KernelTimeout t); - // White-listed callers. + // Permitted callers. friend class PerThreadSemTest; friend class absl::Mutex; friend absl::base_internal::ThreadIdentity* CreateThreadIdentity(); diff --git a/absl/synchronization/internal/per_thread_sem_test.cc b/absl/synchronization/internal/per_thread_sem_test.cc index b5a2f6d4..8cf59e64 100644 --- a/absl/synchronization/internal/per_thread_sem_test.cc +++ b/absl/synchronization/internal/per_thread_sem_test.cc @@ -23,6 +23,7 @@ #include <thread> // NOLINT(build/c++11) #include "gtest/gtest.h" +#include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/thread_identity.h" #include "absl/strings/str_cat.h" diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc index 2949f5a8..b6150b9b 100644 --- a/absl/synchronization/internal/waiter.cc +++ b/absl/synchronization/internal/waiter.cc @@ -86,6 +86,14 @@ static void MaybeBecomeIdle() { #endif #endif +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + class Futex { public: static int WaitUntil(std::atomic<int32_t> *v, int32_t val, diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h index a6e6d4c7..887f9b1b 100644 --- a/absl/synchronization/internal/waiter.h +++ b/absl/synchronization/internal/waiter.h @@ -100,8 +100,8 @@ class Waiter { } // How many periods to remain idle before releasing resources -#ifndef THREAD_SANITIZER - static const int kIdlePeriods = 60; +#ifndef ABSL_HAVE_THREAD_SANITIZER + static constexpr int kIdlePeriods = 60; #else // Memory consumption under ThreadSanitizer is a serious concern, // so we release resources sooner. The value of 1 leads to 1 to 2 second diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index e0879b05..9b7f088a 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -39,6 +39,7 @@ #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/atomic_hook.h" @@ -58,6 +59,7 @@ using absl::base_internal::CurrentThreadIdentityIfPresent; using absl::base_internal::PerThreadSynch; +using absl::base_internal::SchedulingGuard; using absl::base_internal::ThreadIdentity; using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity; using absl::synchronization_internal::GraphCycles; @@ -75,7 +77,7 @@ ABSL_NAMESPACE_BEGIN namespace { -#if defined(THREAD_SANITIZER) +#if defined(ABSL_HAVE_THREAD_SANITIZER) constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore; #else constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort; @@ -85,28 +87,6 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection( kDeadlockDetectionDefault); ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false); -// ------------------------------------------ spinlock support - -// Make sure read-only globals used in the Mutex code are contained on the -// same cacheline and cacheline aligned to eliminate any false sharing with -// other globals from this and other modules. -static struct MutexGlobals { - MutexGlobals() { - // Find machine-specific data needed for Delay() and - // TryAcquireWithSpinning(). This runs in the global constructor - // sequence, and before that zeros are safe values. - num_cpus = absl::base_internal::NumCPUs(); - spinloop_iterations = num_cpus > 1 ? 1500 : 0; - } - int num_cpus; - int spinloop_iterations; - // Pad this struct to a full cacheline to prevent false sharing. - char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)]; -} ABSL_CACHELINE_ALIGNED mutex_globals; -static_assert( - sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE, - "MutexGlobals must occupy an entire cacheline to prevent false sharing"); - ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> submit_profile_data; @@ -143,33 +123,55 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) { symbolizer.Store(fn); } -// spinlock delay on iteration c. Returns new c. +struct ABSL_CACHELINE_ALIGNED MutexGlobals { + absl::once_flag once; + int num_cpus = 0; + int spinloop_iterations = 0; +}; + +static const MutexGlobals& GetMutexGlobals() { + ABSL_CONST_INIT static MutexGlobals data; + absl::base_internal::LowLevelCallOnce(&data.once, [&]() { + data.num_cpus = absl::base_internal::NumCPUs(); + data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0; + }); + return data; +} + +// Spinlock delay on iteration c. Returns new c. namespace { enum DelayMode { AGGRESSIVE, GENTLE }; }; -static int Delay(int32_t c, DelayMode mode) { + +namespace synchronization_internal { +int MutexDelay(int32_t c, int mode) { // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is // aggressive then spin many times before yielding. If the mode is // gentle then spin only a few times before yielding. Aggressive spinning is // used to ensure that an Unlock() call, which must get the spin lock for // any thread to make progress gets it without undue delay. - int32_t limit = (mutex_globals.num_cpus > 1) ? - ((mode == AGGRESSIVE) ? 5000 : 250) : 0; + const int32_t limit = + GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0; if (c < limit) { - c++; // spin + // Spin. + c++; } else { + SchedulingGuard::ScopedEnable enable_rescheduling; ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0); - if (c == limit) { // yield once + if (c == limit) { + // Yield once. AbslInternalMutexYield(); c++; - } else { // then wait + } else { + // Then wait. absl::SleepFor(absl::Microseconds(10)); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); } - return (c); + return c; } +} // namespace synchronization_internal // --------------------------Generic atomic ops // Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to @@ -207,12 +209,12 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits, //------------------------------------------------------------------ // Data for doing deadlock detection. -static absl::base_internal::SpinLock deadlock_graph_mu( - absl::base_internal::kLinkerInitialized); +ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); -// graph used to detect deadlocks. -static GraphCycles *deadlock_graph ABSL_GUARDED_BY(deadlock_graph_mu) - ABSL_PT_GUARDED_BY(deadlock_graph_mu); +// Graph used to detect deadlocks. +ABSL_CONST_INIT static GraphCycles *deadlock_graph + ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu); //------------------------------------------------------------------ // An event mechanism for debugging mutex use. @@ -273,13 +275,12 @@ static const struct { {0, "SignalAll on "}, }; -static absl::base_internal::SpinLock synch_event_mu( - absl::base_internal::kLinkerInitialized); -// protects synch_event +ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); // Hash table size; should be prime > 2. // Can't be too small, as it's used for deadlock detection information. -static const uint32_t kNSynchEvent = 1031; +static constexpr uint32_t kNSynchEvent = 1031; static struct SynchEvent { // this is a trivial hash table for the events // struct is freed when refcount reaches 0 @@ -299,7 +300,7 @@ static struct SynchEvent { // this is a trivial hash table for the events bool log; // logging turned on // Constant after initialization - char name[1]; // actually longer---NUL-terminated std::string + char name[1]; // actually longer---NUL-terminated string } * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu); // Ensure that the object at "addr" has a SynchEvent struct associated with it, @@ -704,7 +705,7 @@ static constexpr bool kDebugMode = false; static constexpr bool kDebugMode = true; #endif -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER static unsigned TsanFlags(Mutex::MuHow how) { return how == kShared ? __tsan_mutex_read_lock : 0; } @@ -1055,6 +1056,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head, // Try to remove thread s from the list of waiters on this mutex. // Does nothing if s is not on the waiter list. void Mutex::TryRemove(PerThreadSynch *s) { + SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v = mu_.load(std::memory_order_relaxed); // acquire spinlock & lock if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait && @@ -1119,7 +1121,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) { this->TryRemove(s); int c = 0; while (s->next != nullptr) { - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); this->TryRemove(s); } if (kDebugMode) { @@ -1438,21 +1440,19 @@ void Mutex::AssertNotHeld() const { // Attempt to acquire *mu, and return whether successful. The implementation // may spin for a short while if the lock cannot be acquired immediately. static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { - int c = mutex_globals.spinloop_iterations; - int result = -1; // result of operation: 0=false, 1=true, -1=unknown - + int c = GetMutexGlobals().spinloop_iterations; do { // do/while somewhat faster on AMD intptr_t v = mu->load(std::memory_order_relaxed); - if ((v & (kMuReader|kMuEvent)) != 0) { // a reader or tracing -> give up - result = 0; + if ((v & (kMuReader|kMuEvent)) != 0) { + return false; // a reader or tracing -> give up } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire mu->compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire, std::memory_order_relaxed)) { - result = 1; + return true; } - } while (result == -1 && --c > 0); - return result == 1; + } while (--c > 0); + return false; } ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() { @@ -1751,7 +1751,8 @@ static const intptr_t ignore_waiting_writers[] = { }; // Internal version of LockWhen(). See LockSlowWithDeadline() -void Mutex::LockSlow(MuHow how, const Condition *cond, int flags) { +ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond, + int flags) { ABSL_RAW_CHECK( this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags), "condition untrue on return from LockSlow"); @@ -1766,7 +1767,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, // All memory accesses are ignored inside of mutex operations + for unlock // operation tsan considers that we've already released the mutex. bool res = false; -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER const int flags = read_lock ? __tsan_mutex_read_lock : 0; const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0); #endif @@ -1816,9 +1817,9 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) { // So we "divert" (which un-ignores both memory accesses and synchronization) // and then separately turn on ignores of memory accesses. ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); - ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); bool res = cond->Eval(); - ANNOTATE_IGNORE_READS_AND_WRITES_END(); + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END(); ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0); static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds. return res; @@ -1899,6 +1900,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) { } void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { + SchedulingGuard::ScopedDisable disable_rescheduling; int c = 0; intptr_t v = mu_.load(std::memory_order_relaxed); if ((v & kMuEvent) != 0) { @@ -2000,7 +2002,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { ABSL_RAW_CHECK( waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, "detected illegal recursion into Mutex code"); - c = Delay(c, GENTLE); // delay, then try again + // delay, then try again + c = synchronization_internal::MutexDelay(c, GENTLE); } ABSL_RAW_CHECK( waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, @@ -2017,7 +2020,8 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { // which holds the lock but is not runnable because its condition is false // or it is in the process of blocking on a condition variable; it must requeue // itself on the mutex/condvar to wait for its condition to become true. -void Mutex::UnlockSlow(SynchWaitParams *waitp) { +ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { + SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v = mu_.load(std::memory_order_relaxed); this->AssertReaderHeld(); CheckForMutexCorruption(v, "Unlock"); @@ -2294,7 +2298,8 @@ void Mutex::UnlockSlow(SynchWaitParams *waitp) { mu_.store(nv, std::memory_order_release); break; // out of for(;;)-loop } - c = Delay(c, AGGRESSIVE); // aggressive here; no one can proceed till we do + // aggressive here; no one can proceed till we do + c = synchronization_internal::MutexDelay(c, AGGRESSIVE); } // end of for(;;)-loop if (wake_list != kPerThreadSynchNull) { @@ -2333,6 +2338,7 @@ void Mutex::Trans(MuHow how) { // It will later acquire the mutex with high probability. Otherwise, we // enqueue thread w on this mutex. void Mutex::Fer(PerThreadSynch *w) { + SchedulingGuard::ScopedDisable disable_rescheduling; int c = 0; ABSL_RAW_CHECK(w->waitp->cond == nullptr, "Mutex::Fer while waiting on Condition"); @@ -2382,7 +2388,7 @@ void Mutex::Fer(PerThreadSynch *w) { return; } } - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); } } @@ -2431,6 +2437,7 @@ CondVar::~CondVar() { // Remove thread s from the list of waiters on this condition variable. void CondVar::Remove(PerThreadSynch *s) { + SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v; int c = 0; for (v = cv_.load(std::memory_order_relaxed);; @@ -2459,7 +2466,8 @@ void CondVar::Remove(PerThreadSynch *s) { std::memory_order_release); return; } else { - c = Delay(c, GENTLE); // try again after a delay + // try again after a delay + c = synchronization_internal::MutexDelay(c, GENTLE); } } } @@ -2492,7 +2500,7 @@ static void CondVarEnqueue(SynchWaitParams *waitp) { !cv_word->compare_exchange_weak(v, v | kCvSpin, std::memory_order_acquire, std::memory_order_relaxed)) { - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); v = cv_word->load(std::memory_order_relaxed); } ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be"); @@ -2591,6 +2599,7 @@ void CondVar::Wakeup(PerThreadSynch *w) { } void CondVar::Signal() { + SchedulingGuard::ScopedDisable disable_rescheduling; ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0); intptr_t v; int c = 0; @@ -2623,7 +2632,7 @@ void CondVar::Signal() { ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); return; } else { - c = Delay(c, GENTLE); + c = synchronization_internal::MutexDelay(c, GENTLE); } } ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); @@ -2660,7 +2669,8 @@ void CondVar::SignalAll () { ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); return; } else { - c = Delay(c, GENTLE); // try again after a delay + // try again after a delay + c = synchronization_internal::MutexDelay(c, GENTLE); } } ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); @@ -2673,7 +2683,7 @@ void ReleasableMutexLock::Release() { this->mu_ = nullptr; } -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER extern "C" void __tsan_read1(void *addr); #else #define __tsan_read1(addr) // do nothing if TSan not enabled diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 8c70c4ce..52401fe3 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -331,17 +331,16 @@ class ABSL_LOCKABLE Mutex { // Mutex::AwaitWithTimeout() // Mutex::AwaitWithDeadline() // - // If `cond` is initially true, do nothing, or act as though `cond` is - // initially false. - // - // If `cond` is initially false, unlock this `Mutex` and block until - // simultaneously: + // Unlocks this `Mutex` and blocks until simultaneously: // - either `cond` is true or the {timeout has expired, deadline has passed} // and // - this `Mutex` can be reacquired, // then reacquire this `Mutex` in the same mode in which it was previously // held, returning `true` iff `cond` is `true` on return. // + // If the condition is initially `true`, the implementation *may* skip the + // release/re-acquire step and return immediately. + // // Deadlines in the past are equivalent to an immediate deadline. // Negative timeouts are equivalent to a zero timeout. // @@ -686,6 +685,11 @@ class Condition { // return processed_ >= current; // }; // mu_.Await(Condition(&reached)); + // + // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReadHeld()" in the + // lambda as it may be called when the mutex is being unlocked from a scope + // holding only a reader lock, which will make the assertion not fulfilled and + // crash the binary. // See class comment for performance advice. In particular, if there // might be more than one waiter for the same condition, make sure @@ -770,6 +774,8 @@ class Condition { // class CondVar { public: + // A `CondVar` allocated on the heap or on the stack can use the this + // constructor. CondVar(); ~CondVar(); @@ -901,9 +907,11 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock { }; #ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX + inline constexpr Mutex::Mutex(absl::ConstInitType) : impl_(absl::kConstInit) {} #else + inline Mutex::Mutex() : mu_(0) { ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); } @@ -911,7 +919,8 @@ inline Mutex::Mutex() : mu_(0) { inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {} inline CondVar::CondVar() : cv_(0) {} -#endif + +#endif // ABSL_INTERNAL_USE_NONPROD_MUTEX // static template <typename T> diff --git a/absl/synchronization/mutex_benchmark.cc b/absl/synchronization/mutex_benchmark.cc index ab188001..933ea14f 100644 --- a/absl/synchronization/mutex_benchmark.cc +++ b/absl/synchronization/mutex_benchmark.cc @@ -16,6 +16,7 @@ #include <mutex> // NOLINT(build/c++11) #include <vector> +#include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/spinlock.h" #include "absl/synchronization/blocking_counter.h" @@ -213,7 +214,7 @@ void BM_ConditionWaiters(benchmark::State& state) { } // Some configurations have higher thread limits than others. -#if defined(__linux__) && !defined(THREAD_SANITIZER) +#if defined(__linux__) && !defined(ABSL_HAVE_THREAD_SANITIZER) constexpr int kMaxConditionWaiters = 8192; #else constexpr int kMaxConditionWaiters = 1024; diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc index afb363af..16fc9058 100644 --- a/absl/synchronization/mutex_test.cc +++ b/absl/synchronization/mutex_test.cc @@ -30,6 +30,7 @@ #include "gtest/gtest.h" #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/sysinfo.h" #include "absl/memory/memory.h" @@ -815,7 +816,7 @@ TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { // Test that we correctly handle the situation when a lock is // held and then destroyed (w/o unlocking). -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER // TSAN reports errors when locked Mutexes are destroyed. TEST(Mutex, DISABLED_LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS { #else @@ -1067,7 +1068,7 @@ class ScopedDisableBazelTestWarnings { const char ScopedDisableBazelTestWarnings::kVarName[] = "TEST_WARNINGS_OUTPUT_FILE"; -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER // This test intentionally creates deadlocks to test the deadlock detector. TEST(Mutex, DISABLED_DeadlockDetectorBazelWarning) { #else @@ -1101,7 +1102,7 @@ TEST(Mutex, DeadlockDetectorBazelWarning) { // annotation-based static thread-safety analysis is not currently // predicate-aware and cannot tell if the two for-loops that acquire and // release the locks have the same predicates. -TEST(Mutex, DeadlockDetectorStessTest) ABSL_NO_THREAD_SAFETY_ANALYSIS { +TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS { // Stress test: Here we create a large number of locks and use all of them. // If a deadlock detector keeps a full graph of lock acquisition order, // it will likely be too slow for this test to pass. @@ -1119,7 +1120,7 @@ TEST(Mutex, DeadlockDetectorStessTest) ABSL_NO_THREAD_SAFETY_ANALYSIS { } } -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER // TSAN reports errors when locked Mutexes are destroyed. TEST(Mutex, DISABLED_DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS { #else |