summaryrefslogtreecommitdiff
path: root/absl/synchronization
diff options
context:
space:
mode:
authorGravatar Benjamin Barenblat <bbaren@google.com>2021-04-08 10:23:55 -0400
committerGravatar Benjamin Barenblat <bbaren@google.com>2021-04-08 10:23:55 -0400
commitfeac56827dd1f0d159ea0bcf2ce37ef1990ac743 (patch)
treebc8ca767be02a5b22118108f9712b72ec64064c3 /absl/synchronization
parent2b91b17d526b464840a3f45504c594cdb50152c5 (diff)
parent997aaf3a28308eba1b9156aa35ab7bca9688e9f6 (diff)
Merge new upstream LTS 20210324.0
Diffstat (limited to 'absl/synchronization')
-rw-r--r--absl/synchronization/BUILD.bazel10
-rw-r--r--absl/synchronization/CMakeLists.txt2
-rw-r--r--absl/synchronization/internal/futex.h154
-rw-r--r--absl/synchronization/internal/kernel_timeout.h3
-rw-r--r--absl/synchronization/internal/mutex_nonprod.cc325
-rw-r--r--absl/synchronization/internal/mutex_nonprod.inc249
-rw-r--r--absl/synchronization/internal/per_thread_sem.cc4
-rw-r--r--absl/synchronization/internal/per_thread_sem.h8
-rw-r--r--absl/synchronization/internal/waiter.cc66
-rw-r--r--absl/synchronization/internal/waiter.h8
-rw-r--r--absl/synchronization/mutex.cc130
-rw-r--r--absl/synchronization/mutex.h141
-rw-r--r--absl/synchronization/mutex_benchmark.cc224
-rw-r--r--absl/synchronization/mutex_test.cc42
14 files changed, 512 insertions, 854 deletions
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index 4d4d6806..5ce16958 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -73,15 +73,14 @@ cc_library(
"internal/create_thread_identity.cc",
"internal/per_thread_sem.cc",
"internal/waiter.cc",
+ "mutex.cc",
"notification.cc",
- ] + select({
- "//conditions:default": ["mutex.cc"],
- }),
+ ],
hdrs = [
"barrier.h",
"blocking_counter.h",
"internal/create_thread_identity.h",
- "internal/mutex_nonprod.inc",
+ "internal/futex.h",
"internal/per_thread_sem.h",
"internal/waiter.h",
"mutex.h",
@@ -89,7 +88,8 @@ cc_library(
],
copts = ABSL_DEFAULT_COPTS,
linkopts = select({
- "//absl:windows": [],
+ "//absl:msvc_compiler": [],
+ "//absl:clang-cl_compiler": [],
"//absl:wasm": [],
"//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS,
diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt
index e5bc52fb..e633d0bf 100644
--- a/absl/synchronization/CMakeLists.txt
+++ b/absl/synchronization/CMakeLists.txt
@@ -52,7 +52,7 @@ absl_cc_library(
"barrier.h"
"blocking_counter.h"
"internal/create_thread_identity.h"
- "internal/mutex_nonprod.inc"
+ "internal/futex.h"
"internal/per_thread_sem.h"
"internal/waiter.h"
"mutex.h"
diff --git a/absl/synchronization/internal/futex.h b/absl/synchronization/internal/futex.h
new file mode 100644
index 00000000..06fbd6d0
--- /dev/null
+++ b/absl/synchronization/internal/futex.h
@@ -0,0 +1,154 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+#error ABSL_INTERNAL_HAVE_FUTEX may not be set on the command line
+#elif defined(__BIONIC__)
+// Bionic supports all the futex operations we need even when some of the futex
+// definitions are missing.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
+// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
+#define ABSL_INTERNAL_HAVE_FUTEX
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_WAIT_BITSET
+#define FUTEX_WAIT_BITSET 9
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#ifndef FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME 256
+#endif
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
+class FutexImpl {
+ public:
+ static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
+ KernelTimeout t) {
+ int err = 0;
+ if (t.has_timeout()) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ struct timespec abs_timeout = t.MakeAbsTimespec();
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ err = syscall(
+ SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until woken by FUTEX_WAKE.
+ err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
+ }
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
+ int32_t bits,
+ const struct timespec *abstime) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ static int Wake(std::atomic<int32_t> *v, int32_t count) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+
+ // FUTEX_WAKE_BITSET
+ static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
+ int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
+ FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
+ nullptr, bits);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ err = -errno;
+ }
+ return err;
+ }
+};
+
+class Futex : public FutexImpl {};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_FUTEX
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_H_
diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h
index 1084e1e6..bbd4d2d7 100644
--- a/absl/synchronization/internal/kernel_timeout.h
+++ b/absl/synchronization/internal/kernel_timeout.h
@@ -26,6 +26,7 @@
#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#include <time.h>
+
#include <algorithm>
#include <limits>
@@ -142,7 +143,7 @@ inline struct timespec KernelTimeout::MakeAbsTimespec() {
struct timespec abstime;
int64_t seconds = (std::min)(n / kNanosPerSecond,
- int64_t{(std::numeric_limits<time_t>::max)()});
+ int64_t{(std::numeric_limits<time_t>::max)()});
abstime.tv_sec = static_cast<time_t>(seconds);
abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
return abstime;
diff --git a/absl/synchronization/internal/mutex_nonprod.cc b/absl/synchronization/internal/mutex_nonprod.cc
deleted file mode 100644
index 334c3bc0..00000000
--- a/absl/synchronization/internal/mutex_nonprod.cc
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Implementation of a small subset of Mutex and CondVar functionality
-// for platforms where the production implementation hasn't been fully
-// ported yet.
-
-#include "absl/synchronization/mutex.h"
-
-#if defined(_WIN32)
-#include <chrono> // NOLINT(build/c++11)
-#else
-#include <sys/time.h>
-#include <time.h>
-#endif
-
-#include <algorithm>
-
-#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
-#include "absl/time/time.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-
-void SetMutexDeadlockDetectionMode(OnDeadlockCycle) {}
-void EnableMutexInvariantDebugging(bool) {}
-
-namespace synchronization_internal {
-
-namespace {
-
-// Return the current time plus the timeout.
-absl::Time DeadlineFromTimeout(absl::Duration timeout) {
- return absl::Now() + timeout;
-}
-
-// Limit the deadline to a positive, 32-bit time_t value to accommodate
-// implementation restrictions. This also deals with InfinitePast and
-// InfiniteFuture.
-absl::Time LimitedDeadline(absl::Time deadline) {
- deadline = std::max(absl::FromTimeT(0), deadline);
- deadline = std::min(deadline, absl::FromTimeT(0x7fffffff));
- return deadline;
-}
-
-} // namespace
-
-#if defined(_WIN32)
-
-MutexImpl::MutexImpl() {}
-
-MutexImpl::~MutexImpl() {
- if (locked_) {
- std_mutex_.unlock();
- }
-}
-
-void MutexImpl::Lock() {
- std_mutex_.lock();
- locked_ = true;
-}
-
-bool MutexImpl::TryLock() {
- bool locked = std_mutex_.try_lock();
- if (locked) locked_ = true;
- return locked;
-}
-
-void MutexImpl::Unlock() {
- locked_ = false;
- released_.SignalAll();
- std_mutex_.unlock();
-}
-
-CondVarImpl::CondVarImpl() {}
-
-CondVarImpl::~CondVarImpl() {}
-
-void CondVarImpl::Signal() { std_cv_.notify_one(); }
-
-void CondVarImpl::SignalAll() { std_cv_.notify_all(); }
-
-void CondVarImpl::Wait(MutexImpl* mu) {
- mu->released_.SignalAll();
- std_cv_.wait(mu->std_mutex_);
-}
-
-bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
- mu->released_.SignalAll();
- time_t when = ToTimeT(deadline);
- int64_t nanos = ToInt64Nanoseconds(deadline - absl::FromTimeT(when));
- std::chrono::system_clock::time_point deadline_tp =
- std::chrono::system_clock::from_time_t(when) +
- std::chrono::duration_cast<std::chrono::system_clock::duration>(
- std::chrono::nanoseconds(nanos));
- auto deadline_since_epoch =
- std::chrono::duration_cast<std::chrono::duration<double>>(
- deadline_tp - std::chrono::system_clock::from_time_t(0));
- return std_cv_.wait_until(mu->std_mutex_, deadline_tp) ==
- std::cv_status::timeout;
-}
-
-#else // ! _WIN32
-
-MutexImpl::MutexImpl() {
- ABSL_RAW_CHECK(pthread_mutex_init(&pthread_mutex_, nullptr) == 0,
- "pthread error");
-}
-
-MutexImpl::~MutexImpl() {
- if (locked_) {
- ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
- }
- ABSL_RAW_CHECK(pthread_mutex_destroy(&pthread_mutex_) == 0, "pthread error");
-}
-
-void MutexImpl::Lock() {
- ABSL_RAW_CHECK(pthread_mutex_lock(&pthread_mutex_) == 0, "pthread error");
- locked_ = true;
-}
-
-bool MutexImpl::TryLock() {
- bool locked = (0 == pthread_mutex_trylock(&pthread_mutex_));
- if (locked) locked_ = true;
- return locked;
-}
-
-void MutexImpl::Unlock() {
- locked_ = false;
- released_.SignalAll();
- ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
-}
-
-CondVarImpl::CondVarImpl() {
- ABSL_RAW_CHECK(pthread_cond_init(&pthread_cv_, nullptr) == 0,
- "pthread error");
-}
-
-CondVarImpl::~CondVarImpl() {
- ABSL_RAW_CHECK(pthread_cond_destroy(&pthread_cv_) == 0, "pthread error");
-}
-
-void CondVarImpl::Signal() {
- ABSL_RAW_CHECK(pthread_cond_signal(&pthread_cv_) == 0, "pthread error");
-}
-
-void CondVarImpl::SignalAll() {
- ABSL_RAW_CHECK(pthread_cond_broadcast(&pthread_cv_) == 0, "pthread error");
-}
-
-void CondVarImpl::Wait(MutexImpl* mu) {
- mu->released_.SignalAll();
- ABSL_RAW_CHECK(pthread_cond_wait(&pthread_cv_, &mu->pthread_mutex_) == 0,
- "pthread error");
-}
-
-bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
- mu->released_.SignalAll();
- struct timespec ts = ToTimespec(deadline);
- int rc = pthread_cond_timedwait(&pthread_cv_, &mu->pthread_mutex_, &ts);
- if (rc == ETIMEDOUT) return true;
- ABSL_RAW_CHECK(rc == 0, "pthread error");
- return false;
-}
-
-#endif // ! _WIN32
-
-void MutexImpl::Await(const Condition& cond) {
- if (cond.Eval()) return;
- released_.SignalAll();
- do {
- released_.Wait(this);
- } while (!cond.Eval());
-}
-
-bool MutexImpl::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
- if (cond.Eval()) return true;
- released_.SignalAll();
- while (true) {
- if (released_.WaitWithDeadline(this, deadline)) return false;
- if (cond.Eval()) return true;
- }
-}
-
-} // namespace synchronization_internal
-
-Mutex::Mutex() {}
-
-Mutex::~Mutex() {}
-
-void Mutex::Lock() { impl()->Lock(); }
-
-void Mutex::Unlock() { impl()->Unlock(); }
-
-bool Mutex::TryLock() { return impl()->TryLock(); }
-
-void Mutex::ReaderLock() { Lock(); }
-
-void Mutex::ReaderUnlock() { Unlock(); }
-
-void Mutex::Await(const Condition& cond) { impl()->Await(cond); }
-
-void Mutex::LockWhen(const Condition& cond) {
- Lock();
- Await(cond);
-}
-
-bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
- return impl()->AwaitWithDeadline(
- cond, synchronization_internal::LimitedDeadline(deadline));
-}
-
-bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
- return AwaitWithDeadline(
- cond, synchronization_internal::DeadlineFromTimeout(timeout));
-}
-
-bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
- Lock();
- return AwaitWithDeadline(cond, deadline);
-}
-
-bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
- return LockWhenWithDeadline(
- cond, synchronization_internal::DeadlineFromTimeout(timeout));
-}
-
-void Mutex::ReaderLockWhen(const Condition& cond) {
- ReaderLock();
- Await(cond);
-}
-
-bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
- absl::Duration timeout) {
- return LockWhenWithTimeout(cond, timeout);
-}
-bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
- absl::Time deadline) {
- return LockWhenWithDeadline(cond, deadline);
-}
-
-void Mutex::EnableDebugLog(const char*) {}
-void Mutex::EnableInvariantDebugging(void (*)(void*), void*) {}
-void Mutex::ForgetDeadlockInfo() {}
-void Mutex::AssertHeld() const {}
-void Mutex::AssertReaderHeld() const {}
-void Mutex::AssertNotHeld() const {}
-
-CondVar::CondVar() {}
-
-CondVar::~CondVar() {}
-
-void CondVar::Signal() { impl()->Signal(); }
-
-void CondVar::SignalAll() { impl()->SignalAll(); }
-
-void CondVar::Wait(Mutex* mu) { return impl()->Wait(mu->impl()); }
-
-bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
- return impl()->WaitWithDeadline(
- mu->impl(), synchronization_internal::LimitedDeadline(deadline));
-}
-
-bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, absl::Now() + timeout);
-}
-
-void CondVar::EnableDebugLog(const char*) {}
-
-#ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
-#else
-#define __tsan_read1(addr) // do nothing if TSan not enabled
-#endif
-
-// A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
- // ThreadSanitizer does not instrument this file for memory accesses.
- // This function dereferences a user variable that can participate
- // in a data race, so we need to manually tell TSan about this memory access.
- __tsan_read1(arg);
- return *(static_cast<bool *>(arg));
-}
-
-Condition::Condition() {} // null constructor, used for kTrue only
-const Condition Condition::kTrue;
-
-Condition::Condition(bool (*func)(void *), void *arg)
- : eval_(&CallVoidPtrFunction),
- function_(func),
- method_(nullptr),
- arg_(arg) {}
-
-bool Condition::CallVoidPtrFunction(const Condition *c) {
- return (*c->function_)(c->arg_);
-}
-
-Condition::Condition(const bool *cond)
- : eval_(CallVoidPtrFunction),
- function_(Dereference),
- method_(nullptr),
- // const_cast is safe since Dereference does not modify arg
- arg_(const_cast<bool *>(cond)) {}
-
-bool Condition::Eval() const {
- // eval_ == null for kTrue
- return (this->eval_ == nullptr) || (*this->eval_)(this);
-}
-
-void RegisterSymbolizer(bool (*)(const void*, char*, int)) {}
-
-ABSL_NAMESPACE_END
-} // namespace absl
diff --git a/absl/synchronization/internal/mutex_nonprod.inc b/absl/synchronization/internal/mutex_nonprod.inc
deleted file mode 100644
index d83bc8a9..00000000
--- a/absl/synchronization/internal/mutex_nonprod.inc
+++ /dev/null
@@ -1,249 +0,0 @@
-// Do not include. This is an implementation detail of base/mutex.h.
-//
-// Declares three classes:
-//
-// base::internal::MutexImpl - implementation helper for Mutex
-// base::internal::CondVarImpl - implementation helper for CondVar
-// base::internal::SynchronizationStorage<T> - implementation helper for
-// Mutex, CondVar
-
-#include <type_traits>
-
-#if defined(_WIN32)
-#include <condition_variable>
-#include <mutex>
-#else
-#include <pthread.h>
-#endif
-
-#include "absl/base/call_once.h"
-#include "absl/time/time.h"
-
-// Declare that Mutex::ReaderLock is actually Lock(). Intended primarily
-// for tests, and even then as a last resort.
-#ifdef ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
-#error ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE cannot be directly set
-#else
-#define ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE 1
-#endif
-
-// Declare that Mutex::EnableInvariantDebugging is not implemented.
-// Intended primarily for tests, and even then as a last resort.
-#ifdef ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED
-#error ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED cannot be directly set
-#else
-#define ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED 1
-#endif
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-class Condition;
-
-namespace synchronization_internal {
-
-class MutexImpl;
-
-// Do not use this implementation detail of CondVar. Provides most of the
-// implementation, but should not be placed directly in static storage
-// because it will not linker initialize properly. See
-// SynchronizationStorage<T> below for what we mean by linker
-// initialization.
-class CondVarImpl {
- public:
- CondVarImpl();
- CondVarImpl(const CondVarImpl&) = delete;
- CondVarImpl& operator=(const CondVarImpl&) = delete;
- ~CondVarImpl();
-
- void Signal();
- void SignalAll();
- void Wait(MutexImpl* mutex);
- bool WaitWithDeadline(MutexImpl* mutex, absl::Time deadline);
-
- private:
-#if defined(_WIN32)
- std::condition_variable_any std_cv_;
-#else
- pthread_cond_t pthread_cv_;
-#endif
-};
-
-// Do not use this implementation detail of Mutex. Provides most of the
-// implementation, but should not be placed directly in static storage
-// because it will not linker initialize properly. See
-// SynchronizationStorage<T> below for what we mean by linker
-// initialization.
-class MutexImpl {
- public:
- MutexImpl();
- MutexImpl(const MutexImpl&) = delete;
- MutexImpl& operator=(const MutexImpl&) = delete;
- ~MutexImpl();
-
- void Lock();
- bool TryLock();
- void Unlock();
- void Await(const Condition& cond);
- bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
-
- private:
- friend class CondVarImpl;
-
-#if defined(_WIN32)
- std::mutex std_mutex_;
-#else
- pthread_mutex_t pthread_mutex_;
-#endif
-
- // True if the underlying mutex is locked. If the destructor is entered
- // while locked_, the underlying mutex is unlocked. Mutex supports
- // destruction while locked, but the same is undefined behavior for both
- // pthread_mutex_t and std::mutex.
- bool locked_ = false;
-
- // Signaled before releasing the lock, in support of Await.
- CondVarImpl released_;
-};
-
-// Do not use this implementation detail of CondVar and Mutex. A storage
-// space for T that supports a LinkerInitialized constructor. T must
-// have a default constructor, which is called by the first call to
-// get(). T's destructor is never called if the LinkerInitialized
-// constructor is called.
-//
-// Objects constructed with the default constructor are constructed and
-// destructed like any other object, and should never be allocated in
-// static storage.
-//
-// Objects constructed with the LinkerInitialized constructor should
-// always be in static storage. For such objects, calls to get() are always
-// valid, except from signal handlers.
-//
-// Note that this implementation relies on undefined language behavior that
-// are known to hold for the set of supported compilers. An analysis
-// follows.
-//
-// From the C++11 standard:
-//
-// [basic.life] says an object has non-trivial initialization if it is of
-// class type and it is initialized by a constructor other than a trivial
-// default constructor. (the LinkerInitialized constructor is
-// non-trivial)
-//
-// [basic.life] says the lifetime of an object with a non-trivial
-// constructor begins when the call to the constructor is complete.
-//
-// [basic.life] says the lifetime of an object with non-trivial destructor
-// ends when the call to the destructor begins.
-//
-// [basic.life] p5 specifies undefined behavior when accessing non-static
-// members of an instance outside its
-// lifetime. (SynchronizationStorage::get() access non-static members)
-//
-// So, LinkerInitialized object of SynchronizationStorage uses a
-// non-trivial constructor, which is called at some point during dynamic
-// initialization, and is therefore subject to order of dynamic
-// initialization bugs, where get() is called before the object's
-// constructor is, resulting in undefined behavior.
-//
-// Similarly, a LinkerInitialized SynchronizationStorage object has a
-// non-trivial destructor, and so its lifetime ends at some point during
-// destruction of objects with static storage duration [basic.start.term]
-// p4. There is a window where other exit code could call get() after this
-// occurs, resulting in undefined behavior.
-//
-// Combined, these statements imply that LinkerInitialized instances
-// of SynchronizationStorage<T> rely on undefined behavior.
-//
-// However, in practice, the implementation works on all supported
-// compilers. Specifically, we rely on:
-//
-// a) zero-initialization being sufficient to initialize
-// LinkerInitialized instances for the purposes of calling
-// get(), regardless of when the constructor is called. This is
-// because the is_dynamic_ boolean is correctly zero-initialized to
-// false.
-//
-// b) the LinkerInitialized constructor is a NOP, and immaterial to
-// even to concurrent calls to get().
-//
-// c) the destructor being a NOP for LinkerInitialized objects
-// (guaranteed by a check for !is_dynamic_), and so any concurrent and
-// subsequent calls to get() functioning as if the destructor were not
-// called, by virtue of the instances' storage remaining valid after the
-// destructor runs.
-//
-// d) That a-c apply transitively when SynchronizationStorage<T> is the
-// only member of a class allocated in static storage.
-//
-// Nothing in the language standard guarantees that a-d hold. In practice,
-// these hold in all supported compilers.
-//
-// Future direction:
-//
-// Ideally, we would simply use std::mutex or a similar class, which when
-// allocated statically would support use immediately after static
-// initialization up until static storage is reclaimed (i.e. the properties
-// we require of all "linker initialized" instances).
-//
-// Regarding construction in static storage, std::mutex is required to
-// provide a constexpr default constructor [thread.mutex.class], which
-// ensures the instance's lifetime begins with static initialization
-// [basic.start.init], and so is immune to any problems caused by the order
-// of dynamic initialization. However, as of this writing Microsoft's
-// Visual Studio does not provide a constexpr constructor for std::mutex.
-// See
-// https://blogs.msdn.microsoft.com/vcblog/2015/06/02/constexpr-complete-for-vs-2015-rtm-c11-compiler-c17-stl/
-//
-// Regarding destruction of instances in static storage, [basic.life] does
-// say an object ends when storage in which the occupies is released, in
-// the case of non-trivial destructor. However, std::mutex is not specified
-// to have a trivial destructor.
-//
-// So, we would need a class with a constexpr default constructor and a
-// trivial destructor. Today, we can achieve neither desired property using
-// std::mutex directly.
-template <typename T>
-class SynchronizationStorage {
- public:
- // Instances allocated on the heap or on the stack should use the default
- // constructor.
- SynchronizationStorage()
- : destruct_(true), once_() {}
-
- constexpr explicit SynchronizationStorage(absl::ConstInitType)
- : destruct_(false), once_(), space_{{0}} {}
-
- SynchronizationStorage(SynchronizationStorage&) = delete;
- SynchronizationStorage& operator=(SynchronizationStorage&) = delete;
-
- ~SynchronizationStorage() {
- if (destruct_) {
- get()->~T();
- }
- }
-
- // Retrieve the object in storage. This is fast and thread safe, but does
- // incur the cost of absl::call_once().
- T* get() {
- absl::call_once(once_, SynchronizationStorage::Construct, this);
- return reinterpret_cast<T*>(&space_);
- }
-
- private:
- static void Construct(SynchronizationStorage<T>* self) {
- new (&self->space_) T();
- }
-
- // When true, T's destructor is run when this is destructed.
- const bool destruct_;
-
- absl::once_flag once_;
-
- // An aligned space for the T.
- alignas(T) unsigned char space_[sizeof(T)];
-};
-
-} // namespace synchronization_internal
-ABSL_NAMESPACE_END
-} // namespace absl
diff --git a/absl/synchronization/internal/per_thread_sem.cc b/absl/synchronization/internal/per_thread_sem.cc
index 821ca9b4..a6031787 100644
--- a/absl/synchronization/internal/per_thread_sem.cc
+++ b/absl/synchronization/internal/per_thread_sem.cc
@@ -68,12 +68,12 @@ ABSL_NAMESPACE_END
extern "C" {
-ABSL_ATTRIBUTE_WEAK void AbslInternalPerThreadSemPost(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
-ABSL_ATTRIBUTE_WEAK bool AbslInternalPerThreadSemWait(
+ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
absl::base_internal::ThreadIdentity *identity;
diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h
index 2228b6e8..7beae8ef 100644
--- a/absl/synchronization/internal/per_thread_sem.h
+++ b/absl/synchronization/internal/per_thread_sem.h
@@ -96,20 +96,20 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void AbslInternalPerThreadSemPost(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity* identity);
-bool AbslInternalPerThreadSemWait(
+bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t);
} // extern "C"
void absl::synchronization_internal::PerThreadSem::Post(
absl::base_internal::ThreadIdentity* identity) {
- AbslInternalPerThreadSemPost(identity);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
}
bool absl::synchronization_internal::PerThreadSem::Wait(
absl::synchronization_internal::KernelTimeout t) {
- return AbslInternalPerThreadSemWait(t);
+ return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
}
#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
index b6150b9b..2123be60 100644
--- a/absl/synchronization/internal/waiter.cc
+++ b/absl/synchronization/internal/waiter.cc
@@ -48,6 +48,7 @@
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
@@ -66,71 +67,6 @@ static void MaybeBecomeIdle() {
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-// Some Android headers are missing these definitions even though they
-// support these futex operations.
-#ifdef __BIONIC__
-#ifndef SYS_futex
-#define SYS_futex __NR_futex
-#endif
-#ifndef FUTEX_WAIT_BITSET
-#define FUTEX_WAIT_BITSET 9
-#endif
-#ifndef FUTEX_PRIVATE_FLAG
-#define FUTEX_PRIVATE_FLAG 128
-#endif
-#ifndef FUTEX_CLOCK_REALTIME
-#define FUTEX_CLOCK_REALTIME 256
-#endif
-#ifndef FUTEX_BITSET_MATCH_ANY
-#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
-#endif
-#endif
-
-#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
-#define SYS_futex_time64 __NR_futex_time64
-#endif
-
-#if defined(SYS_futex_time64) && !defined(SYS_futex)
-#define SYS_futex SYS_futex_time64
-#endif
-
-class Futex {
- public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
- KernelTimeout t) {
- int err = 0;
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
- } else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
- }
- if (err != 0) {
- err = -errno;
- }
- return err;
- }
-
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- err = -errno;
- }
- return err;
- }
-};
-
Waiter::Waiter() {
futex_.store(0, std::memory_order_relaxed);
}
diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h
index 887f9b1b..be3df180 100644
--- a/absl/synchronization/internal/waiter.h
+++ b/absl/synchronization/internal/waiter.h
@@ -36,6 +36,7 @@
#include <cstdint>
#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/futex.h"
#include "absl/synchronization/internal/kernel_timeout.h"
// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
@@ -48,12 +49,7 @@
#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
-#elif defined(__BIONIC__)
-// Bionic supports all the futex operations we need even when some of the futex
-// definitions are missing.
-#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(__linux__) && defined(FUTEX_CLOCK_REALTIME)
-// FUTEX_CLOCK_REALTIME requires Linux >= 2.6.28.
+#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
#elif defined(ABSL_HAVE_SEMAPHORE_H)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index ad13567a..76ad41fe 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -70,7 +70,9 @@ using absl::synchronization_internal::KernelTimeout;
using absl::synchronization_internal::PerThreadSem;
extern "C" {
-ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield() { std::this_thread::yield(); }
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() {
+ std::this_thread::yield();
+}
} // extern "C"
namespace absl {
@@ -89,8 +91,8 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
- submit_profile_data;
+absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
+ submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
const char *msg, const void *obj, int64_t wait_cycles)>
mutex_tracer;
@@ -124,35 +126,44 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
symbolizer.Store(fn);
}
+namespace {
+// Represents the strategy for spin and yield.
+// See the comment in GetMutexGlobals() for more information.
+enum DelayMode { AGGRESSIVE, GENTLE };
+
struct ABSL_CACHELINE_ALIGNED MutexGlobals {
absl::once_flag once;
- int num_cpus = 0;
int spinloop_iterations = 0;
+ int32_t mutex_sleep_limit[2] = {};
};
-static const MutexGlobals& GetMutexGlobals() {
+const MutexGlobals &GetMutexGlobals() {
ABSL_CONST_INIT static MutexGlobals data;
absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- data.num_cpus = absl::base_internal::NumCPUs();
- data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0;
+ const int num_cpus = absl::base_internal::NumCPUs();
+ data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ if (num_cpus > 1) {
+ data.mutex_sleep_limit[AGGRESSIVE] = 5000;
+ data.mutex_sleep_limit[GENTLE] = 250;
+ } else {
+ data.mutex_sleep_limit[AGGRESSIVE] = 0;
+ data.mutex_sleep_limit[GENTLE] = 0;
+ }
});
return data;
}
-
-// Spinlock delay on iteration c. Returns new c.
-namespace {
- enum DelayMode { AGGRESSIVE, GENTLE };
-};
+} // namespace
namespace synchronization_internal {
+// Returns the Mutex delay on iteration `c` depending on the given `mode`.
+// The returned value should be used as `c` for the next call to `MutexDelay`.
int MutexDelay(int32_t c, int mode) {
- // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
- // aggressive then spin many times before yielding. If the mode is
- // gentle then spin only a few times before yielding. Aggressive spinning is
- // used to ensure that an Unlock() call, which must get the spin lock for
- // any thread to make progress gets it without undue delay.
- const int32_t limit =
- GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0;
+ const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
if (c < limit) {
// Spin.
c++;
@@ -161,7 +172,7 @@ int MutexDelay(int32_t c, int mode) {
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
if (c == limit) {
// Yield once.
- AbslInternalMutexYield();
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
c++;
} else {
// Then wait.
@@ -492,7 +503,7 @@ struct SynchWaitParams {
std::atomic<intptr_t> *cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
- // to contend for the mutex.
+ // to contend for the mutex.
};
struct SynchLocksHeld {
@@ -548,7 +559,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
}
// Post on "w"'s associated PerThreadSem.
-inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
@@ -752,11 +763,13 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
synch_deadlock_detection.store(mode, std::memory_order_release);
}
-// Return true iff threads x and y are waiting on the same condition for the
-// same type of lock. Requires that x and y be waiting on the same Mutex
-// queue.
-static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y) {
- return x->waitp->how == y->waitp->how &&
+// Return true iff threads x and y are part of the same equivalence
+// class of waiters. An equivalence class is defined as the set of
+// waiters with the same condition, type of lock, and thread priority.
+//
+// Requires that x and y be waiting on the same Mutex queue.
+static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+ return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
@@ -775,18 +788,19 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// - invalid (iff x is not in a Mutex wait queue),
// - null, or
// - a pointer to a distinct thread waiting later in the same Mutex queue
-// such that all threads in [x, x->skip] have the same condition and
-// lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
+// such that all threads in [x, x->skip] have the same condition, priority
+// and lock type (MuEquivalentWaiter() is true for all pairs in [x,
+// x->skip]).
// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
//
-// By the spec of MuSameCondition(), it is not necessary when removing the
+// By the spec of MuEquivalentWaiter(), it is not necessary when removing the
// first runnable thread y from the front a Mutex queue to adjust the skip
// field of another thread x because if x->skip==y, x->skip must (have) become
// invalid before y is removed. The function TryRemove can remove a specified
// thread from an arbitrary position in the queue whether runnable or not, so
// it fixes up skip fields that would otherwise be left dangling.
// The statement
-// if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
+// if (x->may_skip && MuEquivalentWaiter(x, x->next)) { x->skip = x->next; }
// maintains the invariant provided x is not the last waiter in a Mutex queue
// The statement
// if (x->skip != null) { x->skip = x->skip->skip; }
@@ -920,24 +934,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
- // No unlocker can be scanning the queue, so we can insert between
- // skip-chains, and within a skip-chain if it has the same condition as
- // s. We insert in priority-fifo order, examining the end of every
- // skip-chain, plus every element with the same condition as s.
+ // No unlocker can be scanning the queue, so we can insert into the
+ // middle of the queue.
+ //
+ // Within a skip chain, all waiters have the same priority, so we can
+ // skip forward through the chains until we find one with a lower
+ // priority than the waiter to be enqueued.
PerThreadSynch *advance_to = head; // next value of enqueue_after
- PerThreadSynch *cur; // successor of enqueue_after
do {
enqueue_after = advance_to;
- cur = enqueue_after->next; // this advance ensures progress
- advance_to = Skip(cur); // normally, advance to end of skip chain
- // (side-effect: optimizes skip chain)
- if (advance_to != cur && s->priority > advance_to->priority &&
- MuSameCondition(s, cur)) {
- // but this skip chain is not a singleton, s has higher priority
- // than its tail and has the same condition as the chain,
- // so we can insert within the skip-chain
- advance_to = cur; // advance by just one
- }
+ // (side-effect: optimizes skip chain)
+ advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
// termination guaranteed because s->priority > head->priority
// and head is the end of a skip chain
@@ -956,21 +963,21 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// enqueue_after can be: head, Skip(...), or cur.
// The first two imply enqueue_after->skip == nullptr, and
- // the last is used only if MuSameCondition(s, cur).
+ // the last is used only if MuEquivalentWaiter(s, cur).
// We require this because clearing enqueue_after->skip
// is impossible; enqueue_after's predecessors might also
// incorrectly skip over s if we were to allow other
// insertion points.
- ABSL_RAW_CHECK(
- enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s),
- "Mutex Enqueue failure");
+ ABSL_RAW_CHECK(enqueue_after->skip == nullptr ||
+ MuEquivalentWaiter(enqueue_after, s),
+ "Mutex Enqueue failure");
if (enqueue_after != head && enqueue_after->may_skip &&
- MuSameCondition(enqueue_after, enqueue_after->next)) {
+ MuEquivalentWaiter(enqueue_after, enqueue_after->next)) {
// enqueue_after can skip to its new successor, s
enqueue_after->skip = enqueue_after->next;
}
- if (MuSameCondition(s, s->next)) { // s->may_skip is known to be true
+ if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
s->skip = s->next; // s may skip to its successor
}
} else { // enqueue not done any other way, so
@@ -980,7 +987,7 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
- if (head->may_skip && MuSameCondition(head, s)) {
+ if (head->may_skip && MuEquivalentWaiter(head, s)) {
// head now has successor; may skip
head->skip = s;
}
@@ -1000,7 +1007,7 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
pw->next = w->next; // snip w out of list
if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
- } else if (pw != head && MuSameCondition(pw, pw->next)) {
+ } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
// pw can skip to its new successor
if (pw->next->skip !=
nullptr) { // either skip to its successors skip target
@@ -1070,11 +1077,13 @@ void Mutex::TryRemove(PerThreadSynch *s) {
PerThreadSynch *w;
if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element
- if (!MuSameCondition(s, w)) { // seeking different condition
+ // If the current element isn't equivalent to the waiter to be
+ // removed, we can skip the entire chain.
+ if (!MuEquivalentWaiter(s, w)) {
pw = Skip(w); // so skip all that won't match
// we don't have to worry about dangling skip fields
// in the threads we skipped; none can point to s
- // because their condition differs from s
+ // because they are in a different equivalence class.
} else { // seeking same condition
FixSkip(w, s); // fix up any skip pointer from w to s
pw = w;
@@ -1365,7 +1374,9 @@ static GraphId DeadlockCheck(Mutex *mu) {
len += static_cast<int>(strlen(&b->buf[len]));
}
}
- ABSL_RAW_LOG(ERROR, "Acquiring %p Mutexes held: %s",
+ ABSL_RAW_LOG(ERROR,
+ "Acquiring absl::Mutex %p while holding %s; a cycle in the "
+ "historical lock ordering graph has been observed",
static_cast<void *>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath(
@@ -2139,7 +2150,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
!old_h->may_skip) { // we used old_h as a terminator
old_h->may_skip = true; // allow old_h to skip once more
ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
- if (h != old_h && MuSameCondition(old_h, old_h->next)) {
+ if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next; // old_h not head & can skip to successor
}
}
@@ -2312,7 +2323,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (!cond_waiter) {
// Sample lock contention events only if the (first) waiter was trying to
// acquire the lock, not waiting on a condition variable or Condition.
- int64_t wait_cycles = base_internal::CycleClock::Now() - enqueue_timestamp;
+ int64_t wait_cycles =
+ base_internal::CycleClock::Now() - enqueue_timestamp;
mutex_tracer("slow release", this, wait_cycles);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(enqueue_timestamp);
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index 52401fe3..f49e0c83 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -31,22 +31,23 @@
//
// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
// write access within the current scope.
+//
// ReaderMutexLock
// - An RAII wrapper to acquire and release a `Mutex` for shared/read
// access within the current scope.
//
// WriterMutexLock
-// - Alias for `MutexLock` above, designed for use in distinguishing
-// reader and writer locks within code.
+// - Effectively an alias for `MutexLock` above, designed for use in
+// distinguishing reader and writer locks within code.
//
// In addition to simple mutex locks, this file also defines ways to perform
// locking under certain conditions.
//
-// Condition - (Preferred) Used to wait for a particular predicate that
-// depends on state protected by the `Mutex` to become true.
-// CondVar - A lower-level variant of `Condition` that relies on
-// application code to explicitly signal the `CondVar` when
-// a condition has been met.
+// Condition - (Preferred) Used to wait for a particular predicate that
+// depends on state protected by the `Mutex` to become true.
+// CondVar - A lower-level variant of `Condition` that relies on
+// application code to explicitly signal the `CondVar` when
+// a condition has been met.
//
// See below for more information on using `Condition` or `CondVar`.
//
@@ -72,15 +73,6 @@
#include "absl/synchronization/internal/per_thread_sem.h"
#include "absl/time/time.h"
-// Decide if we should use the non-production implementation because
-// the production implementation hasn't been fully ported yet.
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
-#error ABSL_INTERNAL_USE_NONPROD_MUTEX cannot be directly set
-#elif defined(ABSL_LOW_LEVEL_ALLOC_MISSING)
-#define ABSL_INTERNAL_USE_NONPROD_MUTEX 1
-#include "absl/synchronization/internal/mutex_nonprod.inc"
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -155,7 +147,7 @@ class ABSL_LOCKABLE Mutex {
//
// Example usage:
// namespace foo {
- // ABSL_CONST_INIT Mutex mu(absl::kConstInit);
+ // ABSL_CONST_INIT absl::Mutex mu(absl::kConstInit);
// }
explicit constexpr Mutex(absl::ConstInitType);
@@ -170,7 +162,7 @@ class ABSL_LOCKABLE Mutex {
// Mutex::Unlock()
//
// Releases this `Mutex` and returns it from the exclusive/write state to the
- // free state. Caller must hold the `Mutex` exclusively.
+ // free state. Calling thread must hold the `Mutex` exclusively.
void Unlock() ABSL_UNLOCK_FUNCTION();
// Mutex::TryLock()
@@ -461,24 +453,13 @@ class ABSL_LOCKABLE Mutex {
static void InternalAttemptToUseMutexInFatalSignalHandler();
private:
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
- friend class CondVar;
-
- synchronization_internal::MutexImpl *impl() { return impl_.get(); }
-
- synchronization_internal::SynchronizationStorage<
- synchronization_internal::MutexImpl>
- impl_;
-#else
std::atomic<intptr_t> mu_; // The Mutex state.
// Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem.
- static inline void IncrementSynchSem(Mutex *mu,
- base_internal::PerThreadSynch *w);
- static inline bool DecrementSynchSem(
- Mutex *mu, base_internal::PerThreadSynch *w,
- synchronization_internal::KernelTimeout t);
+ static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
+ static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+ synchronization_internal::KernelTimeout t);
// slow path acquire
void LockSlowLoop(SynchWaitParams *waitp, int flags);
@@ -504,7 +485,6 @@ class ABSL_LOCKABLE Mutex {
void Trans(MuHow how); // used for CondVar->Mutex transfer
void Fer(
base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
-#endif
// Catch the error of writing Mutex when intending MutexLock.
Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
@@ -525,22 +505,36 @@ class ABSL_LOCKABLE Mutex {
// Example:
//
// Class Foo {
-//
+// public:
// Foo::Bar* Baz() {
-// MutexLock l(&lock_);
+// MutexLock lock(&mu_);
// ...
// return bar;
// }
//
// private:
-// Mutex lock_;
+// Mutex mu_;
// };
class ABSL_SCOPED_LOCKABLE MutexLock {
public:
+ // Constructors
+
+ // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
+ // guaranteed to be locked when this object is constructed. Requires that
+ // `mu` be dereferenceable.
explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
+ // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
+ // the above, the condition given by `cond` is also guaranteed to hold when
+ // this object is constructed.
+ explicit MutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
MutexLock& operator=(const MutexLock&) = delete;
@@ -562,6 +556,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
mu->ReaderLock();
}
+ explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_SHARED_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->ReaderLockWhen(cond);
+ }
+
ReaderMutexLock(const ReaderMutexLock&) = delete;
ReaderMutexLock(ReaderMutexLock&&) = delete;
ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
@@ -584,6 +584,12 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
mu->WriterLock();
}
+ explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->WriterLockWhen(cond);
+ }
+
WriterMutexLock(const WriterMutexLock&) = delete;
WriterMutexLock(WriterMutexLock&&) = delete;
WriterMutexLock& operator=(const WriterMutexLock&) = delete;
@@ -622,16 +628,26 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
// `noexcept`; until then this requirement cannot be enforced in the
// type system.)
//
-// Note: to use a `Condition`, you need only construct it and pass it within the
-// appropriate `Mutex' member function, such as `Mutex::Await()`.
+// Note: to use a `Condition`, you need only construct it and pass it to a
+// suitable `Mutex' member function, such as `Mutex::Await()`, or to the
+// constructor of one of the scope guard classes.
//
-// Example:
+// Example using LockWhen/Unlock:
//
// // assume count_ is not internal reference count
// int count_ ABSL_GUARDED_BY(mu_);
+// Condition count_is_zero(+[](int *count) { return *count == 0; }, &count_);
+//
+// mu_.LockWhen(count_is_zero);
+// // ...
+// mu_.Unlock();
//
-// mu_.LockWhen(Condition(+[](int* count) { return *count == 0; },
-// &count_));
+// Example using a scope guard:
+//
+// {
+// MutexLock lock(&mu_, count_is_zero);
+// // ...
+// }
//
// When multiple threads are waiting on exactly the same condition, make sure
// that they are constructed with the same parameters (same pointer to function
@@ -686,10 +702,10 @@ class Condition {
// };
// mu_.Await(Condition(&reached));
//
- // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReadHeld()" in the
- // lambda as it may be called when the mutex is being unlocked from a scope
- // holding only a reader lock, which will make the assertion not fulfilled and
- // crash the binary.
+ // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReaderHeld()" in
+ // the lambda as it may be called when the mutex is being unlocked from a
+ // scope holding only a reader lock, which will make the assertion not
+ // fulfilled and crash the binary.
// See class comment for performance advice. In particular, if there
// might be more than one waiter for the same condition, make sure
@@ -838,17 +854,10 @@ class CondVar {
void EnableDebugLog(const char *name);
private:
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
- synchronization_internal::CondVarImpl *impl() { return impl_.get(); }
- synchronization_internal::SynchronizationStorage<
- synchronization_internal::CondVarImpl>
- impl_;
-#else
bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
void Remove(base_internal::PerThreadSynch *s);
void Wakeup(base_internal::PerThreadSynch *w);
std::atomic<intptr_t> cv_; // Condition variable state.
-#endif
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
};
@@ -870,6 +879,15 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
this->mu_->Lock();
}
}
+
+ explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ if (this->mu_ != nullptr) {
+ this->mu_->LockWhen(cond);
+ }
+ }
+
~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
@@ -892,6 +910,13 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
: mu_(mu) {
this->mu_->Lock();
}
+
+ explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->LockWhen(cond);
+ }
+
~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
@@ -906,12 +931,6 @@ class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
ReleasableMutexLock& operator=(ReleasableMutexLock&&) = delete;
};
-#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
-
-inline constexpr Mutex::Mutex(absl::ConstInitType) : impl_(absl::kConstInit) {}
-
-#else
-
inline Mutex::Mutex() : mu_(0) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
@@ -920,8 +939,6 @@ inline constexpr Mutex::Mutex(absl::ConstInitType) : mu_(0) {}
inline CondVar::CondVar() : cv_(0) {}
-#endif // ABSL_INTERNAL_USE_NONPROD_MUTEX
-
// static
template <typename T>
bool Condition::CastAndCallMethod(const Condition *c) {
@@ -988,7 +1005,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
//
// This has the same memory ordering concerns as RegisterMutexProfiler() above.
void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
- int64_t wait_cycles));
+ int64_t wait_cycles));
// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
// into a single interface, since they are only ever called in pairs.
@@ -1059,7 +1076,7 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void AbslInternalMutexYield();
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
} // extern "C"
#endif // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/absl/synchronization/mutex_benchmark.cc b/absl/synchronization/mutex_benchmark.cc
index 933ea14f..e35aed8b 100644
--- a/absl/synchronization/mutex_benchmark.cc
+++ b/absl/synchronization/mutex_benchmark.cc
@@ -61,8 +61,124 @@ class RaiiLocker<std::mutex> {
std::mutex* mu_;
};
+// RAII object to change the Mutex priority of the running thread.
+class ScopedThreadMutexPriority {
+ public:
+ explicit ScopedThreadMutexPriority(int priority) {
+ absl::base_internal::ThreadIdentity* identity =
+ absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+ identity->per_thread_synch.priority = priority;
+ // Bump next_priority_read_cycles to the infinite future so that the
+ // implementation doesn't re-read the thread's actual scheduler priority
+ // and replace our temporary scoped priority.
+ identity->per_thread_synch.next_priority_read_cycles =
+ std::numeric_limits<int64_t>::max();
+ }
+ ~ScopedThreadMutexPriority() {
+ // Reset the "next priority read time" back to the infinite past so that
+ // the next time the Mutex implementation wants to know this thread's
+ // priority, it re-reads it from the OS instead of using our overridden
+ // priority.
+ absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
+ ->per_thread_synch.next_priority_read_cycles =
+ std::numeric_limits<int64_t>::min();
+ }
+};
+
+void BM_MutexEnqueue(benchmark::State& state) {
+ // In the "multiple priorities" variant of the benchmark, one of the
+ // threads runs with Mutex priority 0 while the rest run at elevated priority.
+ // This benchmarks the performance impact of the presence of a low priority
+ // waiter when a higher priority waiter adds itself of the queue
+ // (b/175224064).
+ //
+ // NOTE: The actual scheduler priority is not modified in this benchmark:
+ // all of the threads get CPU slices with the same priority. Only the
+ // Mutex queueing behavior is modified.
+ const bool multiple_priorities = state.range(0);
+ ScopedThreadMutexPriority priority_setter(
+ (multiple_priorities && state.thread_index != 0) ? 1 : 0);
+
+ struct Shared {
+ absl::Mutex mu;
+ std::atomic<int> looping_threads{0};
+ std::atomic<int> blocked_threads{0};
+ std::atomic<bool> thread_has_mutex{false};
+ };
+ static Shared* shared = new Shared;
+
+ // Set up 'blocked_threads' to count how many threads are currently blocked
+ // in Abseil synchronization code.
+ //
+ // NOTE: Blocking done within the Google Benchmark library itself (e.g.
+ // the barrier which synchronizes threads entering and exiting the benchmark
+ // loop) does _not_ get registered in this counter. This is because Google
+ // Benchmark uses its own synchronization primitives based on std::mutex, not
+ // Abseil synchronization primitives. If at some point the benchmark library
+ // merges into Abseil, this code may break.
+ absl::synchronization_internal::PerThreadSem::SetThreadBlockedCounter(
+ &shared->blocked_threads);
+
+ // The benchmark framework may run several iterations in the same process,
+ // reusing the same static-initialized 'shared' object. Given the semantics
+ // of the members, here, we expect everything to be reset to zero by the
+ // end of any iteration. Assert that's the case, just to be sure.
+ ABSL_RAW_CHECK(
+ shared->looping_threads.load(std::memory_order_relaxed) == 0 &&
+ shared->blocked_threads.load(std::memory_order_relaxed) == 0 &&
+ !shared->thread_has_mutex.load(std::memory_order_relaxed),
+ "Shared state isn't zeroed at start of benchmark iteration");
+
+ static constexpr int kBatchSize = 1000;
+ while (state.KeepRunningBatch(kBatchSize)) {
+ shared->looping_threads.fetch_add(1);
+ for (int i = 0; i < kBatchSize; i++) {
+ {
+ absl::MutexLock l(&shared->mu);
+ shared->thread_has_mutex.store(true, std::memory_order_relaxed);
+ // Spin until all other threads are either out of the benchmark loop
+ // or blocked on the mutex. This ensures that the mutex queue is kept
+ // at its maximal length to benchmark the performance of queueing on
+ // a highly contended mutex.
+ while (shared->looping_threads.load(std::memory_order_relaxed) -
+ shared->blocked_threads.load(std::memory_order_relaxed) !=
+ 1) {
+ }
+ shared->thread_has_mutex.store(false);
+ }
+ // Spin until some other thread has acquired the mutex before we block
+ // again. This ensures that we always go through the slow (queueing)
+ // acquisition path rather than reacquiring the mutex we just released.
+ while (!shared->thread_has_mutex.load(std::memory_order_relaxed) &&
+ shared->looping_threads.load(std::memory_order_relaxed) > 1) {
+ }
+ }
+ // The benchmark framework uses a barrier to ensure that all of the threads
+ // complete their benchmark loop together before any of the threads exit
+ // the loop. So, we need to remove ourselves from the "looping threads"
+ // counter here before potentially blocking on that barrier. Otherwise,
+ // another thread spinning above might wait forever for this thread to
+ // block on the mutex while we in fact are waiting to exit.
+ shared->looping_threads.fetch_add(-1);
+ }
+ absl::synchronization_internal::PerThreadSem::SetThreadBlockedCounter(
+ nullptr);
+}
+
+BENCHMARK(BM_MutexEnqueue)
+ ->Threads(4)
+ ->Threads(64)
+ ->Threads(128)
+ ->Threads(512)
+ ->ArgName("multiple_priorities")
+ ->Arg(false)
+ ->Arg(true);
+
template <typename MutexType>
void BM_Contended(benchmark::State& state) {
+ int priority = state.thread_index % state.range(1);
+ ScopedThreadMutexPriority priority_setter(priority);
+
struct Shared {
MutexType mu;
int data = 0;
@@ -85,81 +201,51 @@ void BM_Contended(benchmark::State& state) {
DelayNs(state.range(0), &shared->data);
}
}
+void SetupBenchmarkArgs(benchmark::internal::Benchmark* bm,
+ bool do_test_priorities) {
+ const int max_num_priorities = do_test_priorities ? 2 : 1;
+ bm->UseRealTime()
+ // ThreadPerCpu poorly handles non-power-of-two CPU counts.
+ ->Threads(1)
+ ->Threads(2)
+ ->Threads(4)
+ ->Threads(6)
+ ->Threads(8)
+ ->Threads(12)
+ ->Threads(16)
+ ->Threads(24)
+ ->Threads(32)
+ ->Threads(48)
+ ->Threads(64)
+ ->Threads(96)
+ ->Threads(128)
+ ->Threads(192)
+ ->Threads(256)
+ ->ArgNames({"cs_ns", "num_prios"});
+ // Some empirically chosen amounts of work in critical section.
+ // 1 is low contention, 2000 is high contention and few values in between.
+ for (int critical_section_ns : {1, 20, 50, 200, 2000}) {
+ for (int num_priorities = 1; num_priorities <= max_num_priorities;
+ num_priorities++) {
+ bm->ArgPair(critical_section_ns, num_priorities);
+ }
+ }
+}
BENCHMARK_TEMPLATE(BM_Contended, absl::Mutex)
- ->UseRealTime()
- // ThreadPerCpu poorly handles non-power-of-two CPU counts.
- ->Threads(1)
- ->Threads(2)
- ->Threads(4)
- ->Threads(6)
- ->Threads(8)
- ->Threads(12)
- ->Threads(16)
- ->Threads(24)
- ->Threads(32)
- ->Threads(48)
- ->Threads(64)
- ->Threads(96)
- ->Threads(128)
- ->Threads(192)
- ->Threads(256)
- // Some empirically chosen amounts of work in critical section.
- // 1 is low contention, 200 is high contention and few values in between.
- ->Arg(1)
- ->Arg(20)
- ->Arg(50)
- ->Arg(200);
+ ->Apply([](benchmark::internal::Benchmark* bm) {
+ SetupBenchmarkArgs(bm, /*do_test_priorities=*/true);
+ });
BENCHMARK_TEMPLATE(BM_Contended, absl::base_internal::SpinLock)
- ->UseRealTime()
- // ThreadPerCpu poorly handles non-power-of-two CPU counts.
- ->Threads(1)
- ->Threads(2)
- ->Threads(4)
- ->Threads(6)
- ->Threads(8)
- ->Threads(12)
- ->Threads(16)
- ->Threads(24)
- ->Threads(32)
- ->Threads(48)
- ->Threads(64)
- ->Threads(96)
- ->Threads(128)
- ->Threads(192)
- ->Threads(256)
- // Some empirically chosen amounts of work in critical section.
- // 1 is low contention, 200 is high contention and few values in between.
- ->Arg(1)
- ->Arg(20)
- ->Arg(50)
- ->Arg(200);
+ ->Apply([](benchmark::internal::Benchmark* bm) {
+ SetupBenchmarkArgs(bm, /*do_test_priorities=*/false);
+ });
BENCHMARK_TEMPLATE(BM_Contended, std::mutex)
- ->UseRealTime()
- // ThreadPerCpu poorly handles non-power-of-two CPU counts.
- ->Threads(1)
- ->Threads(2)
- ->Threads(4)
- ->Threads(6)
- ->Threads(8)
- ->Threads(12)
- ->Threads(16)
- ->Threads(24)
- ->Threads(32)
- ->Threads(48)
- ->Threads(64)
- ->Threads(96)
- ->Threads(128)
- ->Threads(192)
- ->Threads(256)
- // Some empirically chosen amounts of work in critical section.
- // 1 is low contention, 200 is high contention and few values in between.
- ->Arg(1)
- ->Arg(20)
- ->Arg(50)
- ->Arg(200);
+ ->Apply([](benchmark::internal::Benchmark* bm) {
+ SetupBenchmarkArgs(bm, /*do_test_priorities=*/false);
+ });
// Measure the overhead of conditions on mutex release (when they must be
// evaluated). Mutex has (some) support for equivalence classes allowing
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 16fc9058..f8fbf948 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -707,6 +707,40 @@ TEST(Mutex, LockWhen) {
t.join();
}
+TEST(Mutex, LockWhenGuard) {
+ absl::Mutex mu;
+ int n = 30;
+ bool done = false;
+
+ // We don't inline the lambda because the conversion is ambiguous in MSVC.
+ bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; };
+ bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; };
+
+ std::thread t1([&mu, &n, &done, cond_eq_10]() {
+ absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n));
+ done = true;
+ });
+
+ std::thread t2[10];
+ for (std::thread &t : t2) {
+ t = std::thread([&mu, &n, cond_lt_10]() {
+ absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n));
+ ++n;
+ });
+ }
+
+ {
+ absl::MutexLock lock(&mu);
+ n = 0;
+ }
+
+ for (std::thread &t : t2) t.join();
+ t1.join();
+
+ EXPECT_TRUE(done);
+ EXPECT_EQ(n, 10);
+}
+
// --------------------------------------------------------
// The following test requires Mutex::ReaderLock to be a real shared
// lock, which is not the case in all builds.
@@ -818,7 +852,7 @@ TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
// held and then destroyed (w/o unlocking).
#ifdef ABSL_HAVE_THREAD_SANITIZER
// TSAN reports errors when locked Mutexes are destroyed.
-TEST(Mutex, DISABLED_LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, DISABLED_LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#else
TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#endif
@@ -1002,9 +1036,6 @@ TEST(Mutex, AcquireFromCondition) {
x.mu0.Unlock();
}
-// The deadlock detector is not part of non-prod builds, so do not test it.
-#if !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
-
TEST(Mutex, DeadlockDetector) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
@@ -1122,7 +1153,7 @@ TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#ifdef ABSL_HAVE_THREAD_SANITIZER
// TSAN reports errors when locked Mutexes are destroyed.
-TEST(Mutex, DISABLED_DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS {
+TEST(Mutex, DISABLED_DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#else
TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
#endif
@@ -1158,7 +1189,6 @@ TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
c.Lock();
c.Unlock();
}
-#endif // !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
// --------------------------------------------------------
// Test for timeouts/deadlines on condition waits that are specified using