diff options
Diffstat (limited to 'absl/synchronization/internal')
-rw-r--r-- | absl/synchronization/internal/create_thread_identity.cc | 6 | ||||
-rw-r--r-- | absl/synchronization/internal/graphcycles.cc | 6 | ||||
-rw-r--r-- | absl/synchronization/internal/kernel_timeout.h | 56 | ||||
-rw-r--r-- | absl/synchronization/internal/mutex_nonprod.cc | 7 | ||||
-rw-r--r-- | absl/synchronization/internal/mutex_nonprod.inc | 20 | ||||
-rw-r--r-- | absl/synchronization/internal/per_thread_sem.h | 2 | ||||
-rw-r--r-- | absl/synchronization/internal/per_thread_sem_test.cc | 1 | ||||
-rw-r--r-- | absl/synchronization/internal/waiter.cc | 8 | ||||
-rw-r--r-- | absl/synchronization/internal/waiter.h | 4 |
9 files changed, 56 insertions, 54 deletions
diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc index fa0070a9..53a71b34 100644 --- a/absl/synchronization/internal/create_thread_identity.cc +++ b/absl/synchronization/internal/create_thread_identity.cc @@ -32,9 +32,9 @@ namespace synchronization_internal { // ThreadIdentity storage is persistent, we maintain a free-list of previously // released ThreadIdentity objects. -static base_internal::SpinLock freelist_lock( - base_internal::kLinkerInitialized); -static base_internal::ThreadIdentity* thread_identity_freelist; +ABSL_CONST_INIT static base_internal::SpinLock freelist_lock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); +ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist; // A per-thread destructor for reclaiming associated ThreadIdentity objects. // Since we must preserve their storage we cache them for re-use. diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc index 6a2bcdf6..19f9aab5 100644 --- a/absl/synchronization/internal/graphcycles.cc +++ b/absl/synchronization/internal/graphcycles.cc @@ -51,9 +51,9 @@ namespace { // Avoid LowLevelAlloc's default arena since it calls malloc hooks in // which people are doing things like acquiring Mutexes. -static absl::base_internal::SpinLock arena_mu( - absl::base_internal::kLinkerInitialized); -static base_internal::LowLevelAlloc::Arena* arena; +ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); +ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena; static void InitArenaIfNecessary() { arena_mu.Lock(); diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h index d6ac5db0..1084e1e6 100644 --- a/absl/synchronization/internal/kernel_timeout.h +++ b/absl/synchronization/internal/kernel_timeout.h @@ -57,6 +57,10 @@ class KernelTimeout { bool has_timeout() const { return ns_ != 0; } + // Convert to parameter for sem_timedwait/futex/similar. Only for approved + // users. Do not call if !has_timeout. + struct timespec MakeAbsTimespec(); + private: // internal rep, not user visible: ns after unix epoch. // zero = no timeout. @@ -82,34 +86,6 @@ class KernelTimeout { return x; } - // Convert to parameter for sem_timedwait/futex/similar. Only for approved - // users. Do not call if !has_timeout. - struct timespec MakeAbsTimespec() { - int64_t n = ns_; - static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; - if (n == 0) { - ABSL_RAW_LOG( - ERROR, - "Tried to create a timespec from a non-timeout; never do this."); - // But we'll try to continue sanely. no-timeout ~= saturated timeout. - n = (std::numeric_limits<int64_t>::max)(); - } - - // Kernel APIs validate timespecs as being at or after the epoch, - // despite the kernel time type being signed. However, no one can - // tell the difference between a timeout at or before the epoch (since - // all such timeouts have expired!) - if (n < 0) n = 0; - - struct timespec abstime; - int64_t seconds = (std::min)(n / kNanosPerSecond, - int64_t{(std::numeric_limits<time_t>::max)()}); - abstime.tv_sec = static_cast<time_t>(seconds); - abstime.tv_nsec = - static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond); - return abstime; - } - #ifdef _WIN32 // Converts to milliseconds from now, or INFINITE when // !has_timeout(). For use by SleepConditionVariableSRW on @@ -148,6 +124,30 @@ class KernelTimeout { friend class Waiter; }; +inline struct timespec KernelTimeout::MakeAbsTimespec() { + int64_t n = ns_; + static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; + if (n == 0) { + ABSL_RAW_LOG( + ERROR, "Tried to create a timespec from a non-timeout; never do this."); + // But we'll try to continue sanely. no-timeout ~= saturated timeout. + n = (std::numeric_limits<int64_t>::max)(); + } + + // Kernel APIs validate timespecs as being at or after the epoch, + // despite the kernel time type being signed. However, no one can + // tell the difference between a timeout at or before the epoch (since + // all such timeouts have expired!) + if (n < 0) n = 0; + + struct timespec abstime; + int64_t seconds = (std::min)(n / kNanosPerSecond, + int64_t{(std::numeric_limits<time_t>::max)()}); + abstime.tv_sec = static_cast<time_t>(seconds); + abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond); + return abstime; +} + } // namespace synchronization_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/synchronization/internal/mutex_nonprod.cc b/absl/synchronization/internal/mutex_nonprod.cc index 4590b98d..334c3bc0 100644 --- a/absl/synchronization/internal/mutex_nonprod.cc +++ b/absl/synchronization/internal/mutex_nonprod.cc @@ -27,11 +27,16 @@ #include <algorithm> +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/time/time.h" namespace absl { ABSL_NAMESPACE_BEGIN + +void SetMutexDeadlockDetectionMode(OnDeadlockCycle) {} +void EnableMutexInvariantDebugging(bool) {} + namespace synchronization_internal { namespace { @@ -274,7 +279,7 @@ bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) { void CondVar::EnableDebugLog(const char*) {} -#ifdef THREAD_SANITIZER +#ifdef ABSL_HAVE_THREAD_SANITIZER extern "C" void __tsan_read1(void *addr); #else #define __tsan_read1(addr) // do nothing if TSan not enabled diff --git a/absl/synchronization/internal/mutex_nonprod.inc b/absl/synchronization/internal/mutex_nonprod.inc index a1502e72..d83bc8a9 100644 --- a/absl/synchronization/internal/mutex_nonprod.inc +++ b/absl/synchronization/internal/mutex_nonprod.inc @@ -209,31 +209,22 @@ class SynchronizationStorage { // Instances allocated on the heap or on the stack should use the default // constructor. SynchronizationStorage() - : is_dynamic_(true), once_() {} - - // Instances allocated in static storage (not on the heap, not on the - // stack) should use this constructor. - explicit SynchronizationStorage(base_internal::LinkerInitialized) {} + : destruct_(true), once_() {} constexpr explicit SynchronizationStorage(absl::ConstInitType) - : is_dynamic_(false), once_(), space_{{0}} {} + : destruct_(false), once_(), space_{{0}} {} SynchronizationStorage(SynchronizationStorage&) = delete; SynchronizationStorage& operator=(SynchronizationStorage&) = delete; ~SynchronizationStorage() { - if (is_dynamic_) { + if (destruct_) { get()->~T(); } } // Retrieve the object in storage. This is fast and thread safe, but does // incur the cost of absl::call_once(). - // - // For instances in static storage constructed with the - // LinkerInitialized constructor, may be called at any time without - // regard for order of dynamic initialization or destruction of objects - // in static storage. See the class comment for caveats. T* get() { absl::call_once(once_, SynchronizationStorage::Construct, this); return reinterpret_cast<T*>(&space_); @@ -245,10 +236,7 @@ class SynchronizationStorage { } // When true, T's destructor is run when this is destructed. - // - // The LinkerInitialized constructor assumes this value will be set - // false by static initialization. - bool is_dynamic_; + const bool destruct_; absl::once_flag once_; diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h index 8ab43915..2228b6e8 100644 --- a/absl/synchronization/internal/per_thread_sem.h +++ b/absl/synchronization/internal/per_thread_sem.h @@ -78,7 +78,7 @@ class PerThreadSem { // !t.has_timeout() => Wait(t) will return true. static inline bool Wait(KernelTimeout t); - // White-listed callers. + // Permitted callers. friend class PerThreadSemTest; friend class absl::Mutex; friend absl::base_internal::ThreadIdentity* CreateThreadIdentity(); diff --git a/absl/synchronization/internal/per_thread_sem_test.cc b/absl/synchronization/internal/per_thread_sem_test.cc index b5a2f6d4..8cf59e64 100644 --- a/absl/synchronization/internal/per_thread_sem_test.cc +++ b/absl/synchronization/internal/per_thread_sem_test.cc @@ -23,6 +23,7 @@ #include <thread> // NOLINT(build/c++11) #include "gtest/gtest.h" +#include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/thread_identity.h" #include "absl/strings/str_cat.h" diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc index 2949f5a8..b6150b9b 100644 --- a/absl/synchronization/internal/waiter.cc +++ b/absl/synchronization/internal/waiter.cc @@ -86,6 +86,14 @@ static void MaybeBecomeIdle() { #endif #endif +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + class Futex { public: static int WaitUntil(std::atomic<int32_t> *v, int32_t val, diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h index a6e6d4c7..887f9b1b 100644 --- a/absl/synchronization/internal/waiter.h +++ b/absl/synchronization/internal/waiter.h @@ -100,8 +100,8 @@ class Waiter { } // How many periods to remain idle before releasing resources -#ifndef THREAD_SANITIZER - static const int kIdlePeriods = 60; +#ifndef ABSL_HAVE_THREAD_SANITIZER + static constexpr int kIdlePeriods = 60; #else // Memory consumption under ThreadSanitizer is a serious concern, // so we release resources sooner. The value of 1 leads to 1 to 2 second |