summaryrefslogtreecommitdiff
path: root/absl/synchronization
diff options
context:
space:
mode:
Diffstat (limited to 'absl/synchronization')
-rw-r--r--absl/synchronization/internal/futex.h80
-rw-r--r--absl/synchronization/internal/waiter.cc5
-rw-r--r--absl/synchronization/mutex.cc45
3 files changed, 84 insertions, 46 deletions
diff --git a/absl/synchronization/internal/futex.h b/absl/synchronization/internal/futex.h
index 9cf9841d..8d973263 100644
--- a/absl/synchronization/internal/futex.h
+++ b/absl/synchronization/internal/futex.h
@@ -16,9 +16,7 @@
#include "absl/base/config.h"
-#ifdef _WIN32
-#include <windows.h>
-#else
+#ifndef _WIN32
#include <sys/time.h>
#include <unistd.h>
#endif
@@ -85,34 +83,70 @@ namespace synchronization_internal {
class FutexImpl {
public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
+ // Atomically check that `*v == val`, and if it is, then sleep until the
+ // timeout `t` has been reached, or until woken by `Wake()`.
+ static int WaitUntil(std::atomic<int32_t>* v, int32_t val,
KernelTimeout t) {
- long err = 0; // NOLINT(runtime/int)
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ // Monotonic waits are disabled for production builds because go/btm
+ // requires synchronized clocks.
+ // TODO(b/160682823): Find a way to enable this when BTM is not enabled
+ // since production builds don't always run on Borg.
+#if defined(CLOCK_MONOTONIC) && !defined(__GOOGLE_GRTE_VERSION__)
+ constexpr bool kRelativeTimeoutSupported = true;
+#else
+ constexpr bool kRelativeTimeoutSupported = false;
+#endif
+
+ if (!t.has_timeout()) {
+ return Wait(v, val);
+ } else if (kRelativeTimeoutSupported && t.is_relative_timeout()) {
+ auto rel_timespec = t.MakeRelativeTimespec();
+ return WaitRelativeTimeout(v, val, &rel_timespec);
} else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
+ auto abs_timespec = t.MakeAbsTimespec();
+ return WaitAbsoluteTimeout(v, val, &abs_timespec);
}
- if (ABSL_PREDICT_FALSE(err != 0)) {
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until the until
+ // woken by `Wake()`.
+ static int Wait(std::atomic<int32_t>* v, int32_t val) {
+ return WaitAbsoluteTimeout(v, val, nullptr);
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // CLOCK_REALTIME reaches `*abs_timeout`, or until woken by `Wake()`.
+ static int WaitAbsoluteTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* abs_timeout) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ auto err =
+ syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME,
+ val, abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ if (err != 0) {
+ return -errno;
+ }
+ return 0;
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // `*rel_timeout` has elapsed, or until woken by `Wake()`.
+ static int WaitRelativeTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* rel_timeout) {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ FUTEX_PRIVATE_FLAG, val, rel_timeout);
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ // Wakes at most `count` waiters that have entered the sleep state on `v`.
+ static int Wake(std::atomic<int32_t>* v, int32_t count) {
+ auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) {
return -errno;
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
index f2051d67..1b8d8d09 100644
--- a/absl/synchronization/internal/waiter.cc
+++ b/absl/synchronization/internal/waiter.cc
@@ -48,7 +48,6 @@
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
-
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
@@ -67,9 +66,7 @@ static void MaybeBecomeIdle() {
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-Waiter::Waiter() {
- futex_.store(0, std::memory_order_relaxed);
-}
+Waiter::Waiter() : futex_(0) {}
bool Waiter::Wait(KernelTimeout t) {
// Loop until we can atomically decrement futex from a positive
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index ef6d063e..a8911614 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -635,21 +635,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
std::memory_order_release);
}
-// --------------------------time support
-
-// Return the current time plus the timeout. Use the same clock as
-// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return absl::TimeFromTimeval(tv) + timeout;
-#else
- return absl::Now() + timeout;
-#endif
-}
-
// --------------------------Mutexes
// In the layout below, the msb of the bottom byte is currently unused. Also,
@@ -1549,7 +1534,13 @@ void Mutex::LockWhen(const Condition &cond) {
}
bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
+ KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
}
bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
@@ -1572,7 +1563,12 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
absl::Duration timeout) {
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
}
bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
@@ -1597,7 +1593,18 @@ void Mutex::Await(const Condition &cond) {
}
bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{timeout};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
}
bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
@@ -2663,7 +2670,7 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
}
bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+ return WaitCommon(mu, KernelTimeout(timeout));
}
bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {