summaryrefslogtreecommitdiff
path: root/absl/synchronization
diff options
context:
space:
mode:
Diffstat (limited to 'absl/synchronization')
-rw-r--r--absl/synchronization/BUILD.bazel76
-rw-r--r--absl/synchronization/CMakeLists.txt59
-rw-r--r--absl/synchronization/internal/create_thread_identity.cc5
-rw-r--r--absl/synchronization/internal/futex.h106
-rw-r--r--absl/synchronization/internal/futex_waiter.cc111
-rw-r--r--absl/synchronization/internal/futex_waiter.h63
-rw-r--r--absl/synchronization/internal/graphcycles.cc18
-rw-r--r--absl/synchronization/internal/graphcycles_test.cc41
-rw-r--r--absl/synchronization/internal/kernel_timeout.cc225
-rw-r--r--absl/synchronization/internal/kernel_timeout.h236
-rw-r--r--absl/synchronization/internal/kernel_timeout_test.cc394
-rw-r--r--absl/synchronization/internal/per_thread_sem.cc20
-rw-r--r--absl/synchronization/internal/per_thread_sem.h11
-rw-r--r--absl/synchronization/internal/pthread_waiter.cc167
-rw-r--r--absl/synchronization/internal/pthread_waiter.h60
-rw-r--r--absl/synchronization/internal/sem_waiter.cc122
-rw-r--r--absl/synchronization/internal/sem_waiter.h65
-rw-r--r--absl/synchronization/internal/stdcpp_waiter.cc91
-rw-r--r--absl/synchronization/internal/stdcpp_waiter.h56
-rw-r--r--absl/synchronization/internal/waiter.cc403
-rw-r--r--absl/synchronization/internal/waiter.h132
-rw-r--r--absl/synchronization/internal/waiter_base.cc42
-rw-r--r--absl/synchronization/internal/waiter_base.h90
-rw-r--r--absl/synchronization/internal/waiter_test.cc180
-rw-r--r--absl/synchronization/internal/win32_waiter.cc151
-rw-r--r--absl/synchronization/internal/win32_waiter.h70
-rw-r--r--absl/synchronization/lifetime_test.cc18
-rw-r--r--absl/synchronization/mutex.cc818
-rw-r--r--absl/synchronization/mutex.h270
-rw-r--r--absl/synchronization/mutex_method_pointer_test.cc4
-rw-r--r--absl/synchronization/mutex_test.cc301
-rw-r--r--absl/synchronization/notification_test.cc2
32 files changed, 3067 insertions, 1340 deletions
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index ccaee796..0ca94e01 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -21,7 +21,7 @@ load(
"ABSL_TEST_COPTS",
)
-package(default_visibility = ["//visibility:public"])
+package(default_visibility = ["//visibility:private"])
licenses(["notice"])
@@ -38,9 +38,6 @@ cc_library(
"//conditions:default": [],
}),
linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = [
- "//absl:__subpackages__",
- ],
deps = [
"//absl/base",
"//absl/base:base_internal",
@@ -53,27 +50,49 @@ cc_library(
cc_library(
name = "kernel_timeout_internal",
+ srcs = ["internal/kernel_timeout.cc"],
hdrs = ["internal/kernel_timeout.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
- "//absl/synchronization:__pkg__",
],
deps = [
+ "//absl/base",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/time",
],
)
+cc_test(
+ name = "kernel_timeout_internal_test",
+ srcs = ["internal/kernel_timeout_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ flaky = 1,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":kernel_timeout_internal",
+ "//absl/base:config",
+ "//absl/random",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_library(
name = "synchronization",
srcs = [
"barrier.cc",
"blocking_counter.cc",
"internal/create_thread_identity.cc",
+ "internal/futex_waiter.cc",
"internal/per_thread_sem.cc",
- "internal/waiter.cc",
+ "internal/pthread_waiter.cc",
+ "internal/sem_waiter.cc",
+ "internal/stdcpp_waiter.cc",
+ "internal/waiter_base.cc",
+ "internal/win32_waiter.cc",
"mutex.cc",
"notification.cc",
],
@@ -82,8 +101,14 @@ cc_library(
"blocking_counter.h",
"internal/create_thread_identity.h",
"internal/futex.h",
+ "internal/futex_waiter.h",
"internal/per_thread_sem.h",
+ "internal/pthread_waiter.h",
+ "internal/sem_waiter.h",
+ "internal/stdcpp_waiter.h",
"internal/waiter.h",
+ "internal/waiter_base.h",
+ "internal/win32_waiter.h",
"mutex.h",
"notification.h",
],
@@ -94,11 +119,12 @@ cc_library(
"//absl:wasm": [],
"//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:public"],
deps = [
":graphcycles_internal",
":kernel_timeout_internal",
- "//absl/base:atomic_hook",
"//absl/base",
+ "//absl/base:atomic_hook",
"//absl/base:base_internal",
"//absl/base:config",
"//absl/base:core_headers",
@@ -120,7 +146,7 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
- "no_test_wasm",
+ "no_test_wasm", # b/122473323
],
deps = [
":synchronization",
@@ -136,7 +162,7 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
- "no_test_wasm",
+ "no_test_wasm", # b/122473323
],
deps = [
":synchronization",
@@ -152,7 +178,6 @@ cc_binary(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
- visibility = ["//visibility:private"],
deps = [
":synchronization",
":thread_pool",
@@ -169,7 +194,8 @@ cc_test(
deps = [
":graphcycles_internal",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
+ "//absl/log:check",
"@com_google_googletest//:gtest_main",
],
)
@@ -209,6 +235,7 @@ cc_test(
size = "large",
srcs = ["mutex_test.cc"],
copts = ABSL_TEST_COPTS,
+ flaky = 1,
linkopts = ABSL_DEFAULT_LINKOPTS,
shard_count = 25,
deps = [
@@ -217,7 +244,8 @@ cc_test(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
+ "//absl/log:check",
"//absl/memory",
"//absl/time",
"@com_google_googletest//:gtest_main",
@@ -243,7 +271,6 @@ cc_library(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
- "//absl/synchronization:__pkg__",
],
deps = [
":synchronization",
@@ -260,7 +287,6 @@ cc_binary(
testonly = 1,
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = ["//visibility:private"],
deps = [
":mutex_benchmark_common",
],
@@ -271,6 +297,7 @@ cc_test(
size = "small",
srcs = ["notification_test.cc"],
copts = ABSL_TEST_COPTS,
+ flaky = 1,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["no_test_lexan"],
deps = [
@@ -286,6 +313,8 @@ cc_library(
srcs = ["internal/per_thread_sem_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = [
+ ],
deps = [
":synchronization",
"//absl/base",
@@ -315,6 +344,23 @@ cc_test(
)
cc_test(
+ name = "waiter_test",
+ srcs = ["internal/waiter_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ flaky = 1,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":kernel_timeout_internal",
+ ":synchronization",
+ ":thread_pool",
+ "//absl/base:config",
+ "//absl/random",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
name = "lifetime_test",
srcs = [
"lifetime_test.cc",
@@ -328,6 +374,6 @@ cc_test(
deps = [
":synchronization",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log:check",
],
)
diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt
index f64653bb..a0f64e5c 100644
--- a/absl/synchronization/CMakeLists.txt
+++ b/absl/synchronization/CMakeLists.txt
@@ -39,14 +39,33 @@ absl_cc_library(
kernel_timeout_internal
HDRS
"internal/kernel_timeout.h"
+ SRCS
+ "internal/kernel_timeout.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::base
+ absl::config
absl::core_headers
absl::raw_logging_internal
absl::time
)
+absl_cc_test(
+ NAME
+ kernel_timeout_internal_test
+ SRCS
+ "internal/kernel_timeout_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::kernel_timeout_internal
+ absl::config
+ absl::random_random
+ absl::time
+ GTest::gmock_main
+)
+
absl_cc_library(
NAME
synchronization
@@ -55,16 +74,27 @@ absl_cc_library(
"blocking_counter.h"
"internal/create_thread_identity.h"
"internal/futex.h"
+ "internal/futex_waiter.h"
"internal/per_thread_sem.h"
+ "internal/pthread_waiter.h"
+ "internal/sem_waiter.h"
+ "internal/stdcpp_waiter.h"
"internal/waiter.h"
+ "internal/waiter_base.h"
+ "internal/win32_waiter.h"
"mutex.h"
"notification.h"
SRCS
"barrier.cc"
"blocking_counter.cc"
"internal/create_thread_identity.cc"
+ "internal/futex_waiter.cc"
"internal/per_thread_sem.cc"
- "internal/waiter.cc"
+ "internal/pthread_waiter.cc"
+ "internal/sem_waiter.cc"
+ "internal/stdcpp_waiter.cc"
+ "internal/waiter_base.cc"
+ "internal/win32_waiter.cc"
"notification.cc"
"mutex.cc"
COPTS
@@ -121,9 +151,10 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
- absl::graphcycles_internal
+ absl::check
absl::core_headers
- absl::raw_logging_internal
+ absl::graphcycles_internal
+ absl::log
GTest::gmock_main
)
@@ -153,10 +184,11 @@ absl_cc_test(
absl::synchronization
absl::thread_pool
absl::base
+ absl::check
absl::config
absl::core_headers
+ absl::log
absl::memory
- absl::raw_logging_internal
absl::time
GTest::gmock_main
)
@@ -223,6 +255,23 @@ absl_cc_test(
absl_cc_test(
NAME
+ waiter_test
+ SRCS
+ "internal/waiter_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::config
+ absl::kernel_timeout_internal
+ absl::random_random
+ absl::synchronization
+ absl::thread_pool
+ absl::time
+ GTest::gmock_main
+)
+
+absl_cc_test(
+ NAME
lifetime_test
SRCS
"lifetime_test.cc"
@@ -231,5 +280,5 @@ absl_cc_test(
DEPS
absl::synchronization
absl::core_headers
- absl::raw_logging_internal
+ absl::check
)
diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc
index 44e6129b..eacaa28d 100644
--- a/absl/synchronization/internal/create_thread_identity.cc
+++ b/absl/synchronization/internal/create_thread_identity.cc
@@ -13,10 +13,12 @@
// limitations under the License.
#include <stdint.h>
+
#include <new>
// This file is a no-op if the required LowLevelAlloc support is missing.
#include "absl/base/internal/low_level_alloc.h"
+#include "absl/synchronization/internal/waiter.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#include <string.h>
@@ -71,6 +73,9 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) {
void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) {
PerThreadSem::Init(identity);
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
}
static void ResetThreadIdentityBetweenReuse(
diff --git a/absl/synchronization/internal/futex.h b/absl/synchronization/internal/futex.h
index cb97da09..573c01b7 100644
--- a/absl/synchronization/internal/futex.h
+++ b/absl/synchronization/internal/futex.h
@@ -16,9 +16,7 @@
#include "absl/base/config.h"
-#ifdef _WIN32
-#include <windows.h>
-#else
+#ifndef _WIN32
#include <sys/time.h>
#include <unistd.h>
#endif
@@ -34,6 +32,7 @@
#include <atomic>
#include <cstdint>
+#include <limits>
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
@@ -81,51 +80,64 @@ namespace synchronization_internal {
#if defined(SYS_futex_time64) && !defined(SYS_futex)
#define SYS_futex SYS_futex_time64
+using FutexTimespec = struct timespec;
+#else
+// Some libc implementations have switched to an unconditional 64-bit `time_t`
+// definition. This means that `struct timespec` may not match the layout
+// expected by the kernel ABI on 32-bit platforms. So we define the
+// FutexTimespec that matches the kernel timespec definition. It should be safe
+// to use this struct for 64-bit userspace builds too, since it will use another
+// SYS_futex kernel call with 64-bit tv_sec inside timespec.
+struct FutexTimespec {
+ long tv_sec; // NOLINT
+ long tv_nsec; // NOLINT
+};
#endif
class FutexImpl {
public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
- KernelTimeout t) {
- long err = 0; // NOLINT(runtime/int)
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
- } else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
- }
- if (ABSL_PREDICT_FALSE(err != 0)) {
+ // Atomically check that `*v == val`, and if it is, then sleep until the until
+ // woken by `Wake()`.
+ static int Wait(std::atomic<int32_t>* v, int32_t val) {
+ return WaitAbsoluteTimeout(v, val, nullptr);
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // CLOCK_REALTIME reaches `*abs_timeout`, or until woken by `Wake()`.
+ static int WaitAbsoluteTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* abs_timeout) {
+ FutexTimespec ts;
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ auto err = syscall(
+ SYS_futex, reinterpret_cast<int32_t*>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ ToFutexTimespec(abs_timeout, &ts), nullptr, FUTEX_BITSET_MATCH_ANY);
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
- int32_t bits,
- const struct timespec *abstime) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err != 0)) {
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // `*rel_timeout` has elapsed, or until woken by `Wake()`.
+ static int WaitRelativeTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* rel_timeout) {
+ FutexTimespec ts;
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ auto err =
+ syscall(SYS_futex, reinterpret_cast<int32_t*>(v), FUTEX_PRIVATE_FLAG,
+ val, ToFutexTimespec(rel_timeout, &ts));
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ // Wakes at most `count` waiters that have entered the sleep state on `v`.
+ static int Wake(std::atomic<int32_t>* v, int32_t count) {
+ auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) {
return -errno;
@@ -133,16 +145,24 @@ class FutexImpl {
return 0;
}
- // FUTEX_WAKE_BITSET
- static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
- FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- return -errno;
+ private:
+ static FutexTimespec* ToFutexTimespec(const struct timespec* userspace_ts,
+ FutexTimespec* futex_ts) {
+ if (userspace_ts == nullptr) {
+ return nullptr;
}
- return 0;
+
+ using FutexSeconds = decltype(futex_ts->tv_sec);
+ using FutexNanoseconds = decltype(futex_ts->tv_nsec);
+
+ constexpr auto kMaxSeconds{(std::numeric_limits<FutexSeconds>::max)()};
+ if (userspace_ts->tv_sec > kMaxSeconds) {
+ futex_ts->tv_sec = kMaxSeconds;
+ } else {
+ futex_ts->tv_sec = static_cast<FutexSeconds>(userspace_ts->tv_sec);
+ }
+ futex_ts->tv_nsec = static_cast<FutexNanoseconds>(userspace_ts->tv_nsec);
+ return futex_ts;
}
};
diff --git a/absl/synchronization/internal/futex_waiter.cc b/absl/synchronization/internal/futex_waiter.cc
new file mode 100644
index 00000000..87eb3b23
--- /dev/null
+++ b/absl/synchronization/internal/futex_waiter.cc
@@ -0,0 +1,111 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/futex_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX_WAITER
+
+#include <atomic>
+#include <cstdint>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char FutexWaiter::kName[];
+#endif
+
+int FutexWaiter::WaitUntil(std::atomic<int32_t>* v, int32_t val,
+ KernelTimeout t) {
+#ifdef CLOCK_MONOTONIC
+ constexpr bool kHasClockMonotonic = true;
+#else
+ constexpr bool kHasClockMonotonic = false;
+#endif
+
+ // We can't call Futex::WaitUntil() here because the prodkernel implementation
+ // does not know about KernelTimeout::SupportsSteadyClock().
+ if (!t.has_timeout()) {
+ return Futex::Wait(v, val);
+ } else if (kHasClockMonotonic && KernelTimeout::SupportsSteadyClock() &&
+ t.is_relative_timeout()) {
+ auto rel_timespec = t.MakeRelativeTimespec();
+ return Futex::WaitRelativeTimeout(v, val, &rel_timespec);
+ } else {
+ auto abs_timespec = t.MakeAbsTimespec();
+ return Futex::WaitAbsoluteTimeout(v, val, &abs_timespec);
+ }
+}
+
+bool FutexWaiter::Wait(KernelTimeout t) {
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int32_t x = futex_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ const int err = WaitUntil(&futex_, 0, t);
+ if (err != 0) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (err == -ETIMEDOUT) {
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void FutexWaiter::Post() {
+ if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void FutexWaiter::Poke() {
+ // Wake one thread waiting on the futex.
+ const int err = Futex::Wake(&futex_, 1);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_FUTEX_WAITER
diff --git a/absl/synchronization/internal/futex_waiter.h b/absl/synchronization/internal/futex_waiter.h
new file mode 100644
index 00000000..11dfa93b
--- /dev/null
+++ b/absl/synchronization/internal/futex_waiter.h
@@ -0,0 +1,63 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_FUTEX_WAITER 1
+
+class FutexWaiter : public WaiterCrtp<FutexWaiter> {
+ public:
+ FutexWaiter() : futex_(0) {}
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "FutexWaiter";
+
+ private:
+ // Atomically check that `*v == val`, and if it is, then sleep until the
+ // timeout `t` has been reached, or until woken by `Wake()`.
+ static int WaitUntil(std::atomic<int32_t>* v, int32_t val,
+ KernelTimeout t);
+
+ // Futexes are defined by specification to be 32-bits.
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+ std::atomic<int32_t> futex_;
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_FUTEX
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc
index feec4581..39b18482 100644
--- a/absl/synchronization/internal/graphcycles.cc
+++ b/absl/synchronization/internal/graphcycles.cc
@@ -37,6 +37,7 @@
#include <algorithm>
#include <array>
+#include <cinttypes>
#include <limits>
#include "absl/base/internal/hide_ptr.h"
#include "absl/base/internal/raw_logging.h"
@@ -114,7 +115,7 @@ class Vec {
if (src->ptr_ == src->space_) {
// Need to actually copy
resize(src->size_);
- std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+ std::copy_n(src->ptr_, src->size_, ptr_);
src->size_ = 0;
} else {
Discard();
@@ -148,7 +149,7 @@ class Vec {
size_t request = static_cast<size_t>(capacity_) * sizeof(T);
T* copy = static_cast<T*>(
base_internal::LowLevelAlloc::AllocWithArena(request, arena));
- std::copy(ptr_, ptr_ + size_, copy);
+ std::copy_n(ptr_, size_, copy);
Discard();
ptr_ = copy;
}
@@ -386,19 +387,22 @@ bool GraphCycles::CheckInvariants() const {
Node* nx = r->nodes_[x];
void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
- ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+ ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %" PRIu32 " %p",
+ x, ptr);
}
if (nx->visited) {
- ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+ ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %" PRIu32, x);
}
if (!ranks.insert(nx->rank)) {
- ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+ ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %" PRId32, nx->rank);
}
HASH_FOR_EACH(y, nx->out) {
Node* ny = r->nodes_[static_cast<uint32_t>(y)];
if (nx->rank >= ny->rank) {
- ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
- nx->rank, ny->rank);
+ ABSL_RAW_LOG(FATAL,
+ "Edge %" PRIu32 " ->%" PRId32
+ " has bad rank assignment %" PRId32 "->%" PRId32,
+ x, y, nx->rank, ny->rank);
}
}
}
diff --git a/absl/synchronization/internal/graphcycles_test.cc b/absl/synchronization/internal/graphcycles_test.cc
index 74eaffe7..3c6ef798 100644
--- a/absl/synchronization/internal/graphcycles_test.cc
+++ b/absl/synchronization/internal/graphcycles_test.cc
@@ -21,8 +21,9 @@
#include <vector>
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -65,51 +66,51 @@ static bool IsReachable(Edges *edges, int from, int to,
}
static void PrintEdges(Edges *edges) {
- ABSL_RAW_LOG(INFO, "EDGES (%zu)", edges->size());
+ LOG(INFO) << "EDGES (" << edges->size() << ")";
for (const auto &edge : *edges) {
int a = edge.from;
int b = edge.to;
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void PrintGCEdges(Nodes *nodes, const IdMap &id, GraphCycles *gc) {
- ABSL_RAW_LOG(INFO, "GC EDGES");
+ LOG(INFO) << "GC EDGES";
for (int a : *nodes) {
for (int b : *nodes) {
if (gc->HasEdge(Get(id, a), Get(id, b))) {
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
}
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void PrintTransitiveClosure(Nodes *nodes, Edges *edges) {
- ABSL_RAW_LOG(INFO, "Transitive closure");
+ LOG(INFO) << "Transitive closure";
for (int a : *nodes) {
for (int b : *nodes) {
std::unordered_set<int> seen;
if (IsReachable(edges, a, b, &seen)) {
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
}
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void PrintGCTransitiveClosure(Nodes *nodes, const IdMap &id,
GraphCycles *gc) {
- ABSL_RAW_LOG(INFO, "GC Transitive closure");
+ LOG(INFO) << "GC Transitive closure";
for (int a : *nodes) {
for (int b : *nodes) {
if (gc->IsReachable(Get(id, a), Get(id, b))) {
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
}
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
@@ -125,9 +126,8 @@ static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
PrintGCEdges(nodes, id, gc);
PrintTransitiveClosure(nodes, edges);
PrintGCTransitiveClosure(nodes, id, gc);
- ABSL_RAW_LOG(FATAL, "gc_reachable %s reachable %s a %d b %d",
- gc_reachable ? "true" : "false",
- reachable ? "true" : "false", a, b);
+ LOG(FATAL) << "gc_reachable " << gc_reachable << " reachable "
+ << reachable << " a " << a << " b " << b;
}
}
}
@@ -142,7 +142,7 @@ static void CheckEdges(Nodes *nodes, Edges *edges, const IdMap &id,
if (!gc->HasEdge(Get(id, a), Get(id, b))) {
PrintEdges(edges);
PrintGCEdges(nodes, id, gc);
- ABSL_RAW_LOG(FATAL, "!gc->HasEdge(%d, %d)", a, b);
+ LOG(FATAL) << "!gc->HasEdge(" << a << ", " << b << ")";
}
}
for (const auto &a : *nodes) {
@@ -155,13 +155,12 @@ static void CheckEdges(Nodes *nodes, Edges *edges, const IdMap &id,
if (count != edges->size()) {
PrintEdges(edges);
PrintGCEdges(nodes, id, gc);
- ABSL_RAW_LOG(FATAL, "edges->size() %zu count %d", edges->size(), count);
+ LOG(FATAL) << "edges->size() " << edges->size() << " count " << count;
}
}
static void CheckInvariants(const GraphCycles &gc) {
- if (ABSL_PREDICT_FALSE(!gc.CheckInvariants()))
- ABSL_RAW_LOG(FATAL, "CheckInvariants");
+ CHECK(gc.CheckInvariants()) << "CheckInvariants";
}
// Returns the index of a randomly chosen node in *nodes.
@@ -309,7 +308,7 @@ TEST(GraphCycles, RandomizedTest) {
break;
default:
- ABSL_RAW_LOG(FATAL, "op %d", op);
+ LOG(FATAL) << "op " << op;
}
// Very rarely, test graph expansion by adding then removing many nodes.
diff --git a/absl/synchronization/internal/kernel_timeout.cc b/absl/synchronization/internal/kernel_timeout.cc
new file mode 100644
index 00000000..48ea6287
--- /dev/null
+++ b/absl/synchronization/internal/kernel_timeout.cc
@@ -0,0 +1,225 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
+#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr uint64_t KernelTimeout::kNoTimeout;
+constexpr int64_t KernelTimeout::kMaxNanos;
+#endif
+
+int64_t KernelTimeout::SteadyClockNow() {
+ if (!SupportsSteadyClock()) {
+ return absl::GetCurrentTimeNanos();
+ }
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::steady_clock::now().time_since_epoch())
+ .count();
+}
+
+KernelTimeout::KernelTimeout(absl::Time t) {
+ // `absl::InfiniteFuture()` is a common "no timeout" value and cheaper to
+ // compare than convert.
+ if (t == absl::InfiniteFuture()) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ int64_t unix_nanos = absl::ToUnixNanos(t);
+
+ // A timeout that lands before the unix epoch is converted to 0.
+ // In theory implementations should expire these timeouts immediately.
+ if (unix_nanos < 0) {
+ unix_nanos = 0;
+ }
+
+ // Values greater than or equal to kMaxNanos are converted to infinite.
+ if (unix_nanos >= kMaxNanos) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ rep_ = static_cast<uint64_t>(unix_nanos) << 1;
+}
+
+KernelTimeout::KernelTimeout(absl::Duration d) {
+ // `absl::InfiniteDuration()` is a common "no timeout" value and cheaper to
+ // compare than convert.
+ if (d == absl::InfiniteDuration()) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ int64_t nanos = absl::ToInt64Nanoseconds(d);
+
+ // Negative durations are normalized to 0.
+ // In theory implementations should expire these timeouts immediately.
+ if (nanos < 0) {
+ nanos = 0;
+ }
+
+ int64_t now = SteadyClockNow();
+ if (nanos > kMaxNanos - now) {
+ // Durations that would be greater than kMaxNanos are converted to infinite.
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ nanos += now;
+ rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1};
+}
+
+int64_t KernelTimeout::MakeAbsNanos() const {
+ if (!has_timeout()) {
+ return kMaxNanos;
+ }
+
+ int64_t nanos = RawAbsNanos();
+
+ if (is_relative_timeout()) {
+ // We need to change epochs, because the relative timeout might be
+ // represented by an absolute timestamp from another clock.
+ nanos = std::max<int64_t>(nanos - SteadyClockNow(), 0);
+ int64_t now = absl::GetCurrentTimeNanos();
+ if (nanos > kMaxNanos - now) {
+ // Overflow.
+ nanos = kMaxNanos;
+ } else {
+ nanos += now;
+ }
+ } else if (nanos == 0) {
+ // Some callers have assumed that 0 means no timeout, so instead we return a
+ // time of 1 nanosecond after the epoch.
+ nanos = 1;
+ }
+
+ return nanos;
+}
+
+int64_t KernelTimeout::InNanosecondsFromNow() const {
+ if (!has_timeout()) {
+ return kMaxNanos;
+ }
+
+ int64_t nanos = RawAbsNanos();
+ if (is_absolute_timeout()) {
+ return std::max<int64_t>(nanos - absl::GetCurrentTimeNanos(), 0);
+ }
+ return std::max<int64_t>(nanos - SteadyClockNow(), 0);
+}
+
+struct timespec KernelTimeout::MakeAbsTimespec() const {
+ return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos()));
+}
+
+struct timespec KernelTimeout::MakeRelativeTimespec() const {
+ return absl::ToTimespec(absl::Nanoseconds(InNanosecondsFromNow()));
+}
+
+#ifndef _WIN32
+struct timespec KernelTimeout::MakeClockAbsoluteTimespec(clockid_t c) const {
+ if (!has_timeout()) {
+ return absl::ToTimespec(absl::Nanoseconds(kMaxNanos));
+ }
+
+ int64_t nanos = RawAbsNanos();
+ if (is_absolute_timeout()) {
+ nanos -= absl::GetCurrentTimeNanos();
+ } else {
+ nanos -= SteadyClockNow();
+ }
+
+ struct timespec now;
+ ABSL_RAW_CHECK(clock_gettime(c, &now) == 0, "clock_gettime() failed");
+ absl::Duration from_clock_epoch =
+ absl::DurationFromTimespec(now) + absl::Nanoseconds(nanos);
+ if (from_clock_epoch <= absl::ZeroDuration()) {
+ // Some callers have assumed that 0 means no timeout, so instead we return a
+ // time of 1 nanosecond after the epoch. For safety we also do not return
+ // negative values.
+ return absl::ToTimespec(absl::Nanoseconds(1));
+ }
+ return absl::ToTimespec(from_clock_epoch);
+}
+#endif
+
+KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
+ constexpr DWord kInfinite = std::numeric_limits<DWord>::max();
+
+ if (!has_timeout()) {
+ return kInfinite;
+ }
+
+ constexpr uint64_t kNanosInMillis = uint64_t{1'000'000};
+ constexpr uint64_t kMaxValueNanos =
+ std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;
+
+ uint64_t ns_from_now = static_cast<uint64_t>(InNanosecondsFromNow());
+ if (ns_from_now >= kMaxValueNanos) {
+ // Rounding up would overflow.
+ return kInfinite;
+ }
+ // Convert to milliseconds, always rounding up.
+ uint64_t ms_from_now = (ns_from_now + kNanosInMillis - 1) / kNanosInMillis;
+ if (ms_from_now > kInfinite) {
+ return kInfinite;
+ }
+ return static_cast<DWord>(ms_from_now);
+}
+
+std::chrono::time_point<std::chrono::system_clock>
+KernelTimeout::ToChronoTimePoint() const {
+ if (!has_timeout()) {
+ return std::chrono::time_point<std::chrono::system_clock>::max();
+ }
+
+ // The cast to std::microseconds is because (on some platforms) the
+ // std::ratio used by std::chrono::steady_clock doesn't convert to
+ // std::nanoseconds, so it doesn't compile.
+ auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
+ std::chrono::nanoseconds(MakeAbsNanos()));
+ return std::chrono::system_clock::from_time_t(0) + micros;
+}
+
+std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
+ if (!has_timeout()) {
+ return std::chrono::nanoseconds::max();
+ }
+ return std::chrono::nanoseconds(InNanosecondsFromNow());
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h
index f5c2c0ef..06404a75 100644
--- a/absl/synchronization/internal/kernel_timeout.h
+++ b/absl/synchronization/internal/kernel_timeout.h
@@ -11,26 +11,21 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
-
-// An optional absolute timeout, with nanosecond granularity,
-// compatible with absl::Time. Suitable for in-register
-// parameter-passing (e.g. syscalls.)
-// Constructible from a absl::Time (for a timeout to be respected) or {}
-// (for "no timeout".)
-// This is a private low-level API for use by a handful of low-level
-// components. Higher-level components should build APIs based on
-// absl::Time and absl::Duration.
#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-#include <time.h>
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
#include <cstdint>
+#include <ctime>
#include <limits>
+#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
@@ -39,58 +34,73 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-class Waiter;
-
+// An optional timeout, with nanosecond granularity.
+//
+// This is a private low-level API for use by a handful of low-level
+// components. Higher-level components should build APIs based on
+// absl::Time and absl::Duration.
class KernelTimeout {
public:
- // A timeout that should expire at <t>. Any value, in the full
- // InfinitePast() to InfiniteFuture() range, is valid here and will be
- // respected.
- explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
- // No timeout.
- KernelTimeout() : ns_(0) {}
+ // Construct an absolute timeout that should expire at `t`.
+ explicit KernelTimeout(absl::Time t);
- // A more explicit factory for those who prefer it. Equivalent to {}.
- static KernelTimeout Never() { return {}; }
+ // Construct a relative timeout that should expire after `d`.
+ explicit KernelTimeout(absl::Duration d);
- // We explicitly do not support other custom formats: timespec, int64_t nanos.
- // Unify on this and absl::Time, please.
+ // Infinite timeout.
+ constexpr KernelTimeout() : rep_(kNoTimeout) {}
- bool has_timeout() const { return ns_ != 0; }
+ // A more explicit factory for those who prefer it.
+ // Equivalent to `KernelTimeout()`.
+ static constexpr KernelTimeout Never() { return KernelTimeout(); }
- // Convert to parameter for sem_timedwait/futex/similar. Only for approved
- // users. Do not call if !has_timeout.
+ // Returns true if there is a timeout that will eventually expire.
+ // Returns false if the timeout is infinite.
+ bool has_timeout() const { return rep_ != kNoTimeout; }
+
+ // If `has_timeout()` is true, returns true if the timeout was provided as an
+ // `absl::Time`. The return value is undefined if `has_timeout()` is false
+ // because all indefinite timeouts are equivalent.
+ bool is_absolute_timeout() const { return (rep_ & 1) == 0; }
+
+ // If `has_timeout()` is true, returns true if the timeout was provided as an
+ // `absl::Duration`. The return value is undefined if `has_timeout()` is false
+ // because all indefinite timeouts are equivalent.
+ bool is_relative_timeout() const { return (rep_ & 1) == 1; }
+
+ // Convert to `struct timespec` for interfaces that expect an absolute
+ // timeout. If !has_timeout() or is_relative_timeout(), attempts to convert to
+ // a reasonable absolute timeout, but callers should to test has_timeout() and
+ // is_relative_timeout() and prefer to use a more appropriate interface.
struct timespec MakeAbsTimespec() const;
- // Convert to unix epoch nanos. Do not call if !has_timeout.
+ // Convert to `struct timespec` for interfaces that expect a relative
+ // timeout. If !has_timeout() or is_absolute_timeout(), attempts to convert to
+ // a reasonable relative timeout, but callers should to test has_timeout() and
+ // is_absolute_timeout() and prefer to use a more appropriate interface. Since
+ // the return value is a relative duration, it should be recomputed by calling
+ // this method in the case of a spurious wakeup.
+ struct timespec MakeRelativeTimespec() const;
+
+#ifndef _WIN32
+ // Convert to `struct timespec` for interfaces that expect an absolute timeout
+ // on a specific clock `c`. This is similar to `MakeAbsTimespec()`, but
+ // callers usually want to use this method with `CLOCK_MONOTONIC` when
+ // relative timeouts are requested, and when the appropriate interface expects
+ // an absolute timeout relative to a specific clock (for example,
+ // pthread_cond_clockwait() or sem_clockwait()). If !has_timeout(), attempts
+ // to convert to a reasonable absolute timeout, but callers should to test
+ // has_timeout() prefer to use a more appropriate interface.
+ struct timespec MakeClockAbsoluteTimespec(clockid_t c) const;
+#endif
+
+ // Convert to unix epoch nanos for interfaces that expect an absolute timeout
+ // in nanoseconds. If !has_timeout() or is_relative_timeout(), attempts to
+ // convert to a reasonable absolute timeout, but callers should to test
+ // has_timeout() and is_relative_timeout() and prefer to use a more
+ // appropriate interface.
int64_t MakeAbsNanos() const;
- private:
- // internal rep, not user visible: ns after unix epoch.
- // zero = no timeout.
- // Negative we treat as an unlikely (and certainly expired!) but valid
- // timeout.
- int64_t ns_;
-
- static int64_t MakeNs(absl::Time t) {
- // optimization--InfiniteFuture is common "no timeout" value
- // and cheaper to compare than convert.
- if (t == absl::InfiniteFuture()) return 0;
- int64_t x = ToUnixNanos(t);
-
- // A timeout that lands exactly on the epoch (x=0) needs to be respected,
- // so we alter it unnoticably to 1. Negative timeouts are in
- // theory supported, but handled poorly by the kernel (long
- // delays) so push them forward too; since all such times have
- // already passed, it's indistinguishable.
- if (x <= 0) x = 1;
- // A time larger than what can be represented to the kernel is treated
- // as no timeout.
- if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
- return x;
- }
-
-#ifdef _WIN32
// Converts to milliseconds from now, or INFINITE when
// !has_timeout(). For use by SleepConditionVariableSRW on
// Windows. Callers should recognize that the return value is a
@@ -100,68 +110,66 @@ class KernelTimeout {
// so we define our own DWORD and INFINITE instead of getting them from
// <intsafe.h> and <WinBase.h>.
typedef unsigned long DWord; // NOLINT
- DWord InMillisecondsFromNow() const {
- constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
- if (!has_timeout()) {
- return kInfinite;
- }
- // The use of absl::Now() to convert from absolute time to
- // relative time means that absl::Now() cannot use anything that
- // depends on KernelTimeout (for example, Mutex) on Windows.
- int64_t now = ToUnixNanos(absl::Now());
- if (ns_ >= now) {
- // Round up so that Now() + ms_from_now >= ns_.
- constexpr uint64_t max_nanos =
- (std::numeric_limits<int64_t>::max)() - 999999u;
- uint64_t ms_from_now =
- ((std::min)(max_nanos, static_cast<uint64_t>(ns_ - now)) + 999999u) /
- 1000000u;
- if (ms_from_now > kInfinite) {
- return kInfinite;
- }
- return static_cast<DWord>(ms_from_now);
- }
- return 0;
- }
-
- friend class Waiter;
-#endif
-};
+ DWord InMillisecondsFromNow() const;
+
+ // Convert to std::chrono::time_point for interfaces that expect an absolute
+ // timeout, like std::condition_variable::wait_until(). If !has_timeout() or
+ // is_relative_timeout(), attempts to convert to a reasonable absolute
+ // timeout, but callers should test has_timeout() and is_relative_timeout()
+ // and prefer to use a more appropriate interface.
+ std::chrono::time_point<std::chrono::system_clock> ToChronoTimePoint() const;
+
+ // Convert to std::chrono::time_point for interfaces that expect a relative
+ // timeout, like std::condition_variable::wait_for(). If !has_timeout() or
+ // is_absolute_timeout(), attempts to convert to a reasonable relative
+ // timeout, but callers should test has_timeout() and is_absolute_timeout()
+ // and prefer to use a more appropriate interface. Since the return value is a
+ // relative duration, it should be recomputed by calling this method in the
+ // case of a spurious wakeup.
+ std::chrono::nanoseconds ToChronoDuration() const;
+
+ // Returns true if steady (aka monotonic) clocks are supported by the system.
+ // This method exists because go/btm requires synchronized clocks, and
+ // thus requires we use the system (aka walltime) clock.
+ static constexpr bool SupportsSteadyClock() { return true; }
-inline struct timespec KernelTimeout::MakeAbsTimespec() const {
- int64_t n = ns_;
- static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
- if (n == 0) {
- ABSL_RAW_LOG(
- ERROR, "Tried to create a timespec from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- n = (std::numeric_limits<int64_t>::max)();
- }
-
- // Kernel APIs validate timespecs as being at or after the epoch,
- // despite the kernel time type being signed. However, no one can
- // tell the difference between a timeout at or before the epoch (since
- // all such timeouts have expired!)
- if (n < 0) n = 0;
-
- struct timespec abstime;
- int64_t seconds = (std::min)(n / kNanosPerSecond,
- int64_t{(std::numeric_limits<time_t>::max)()});
- abstime.tv_sec = static_cast<time_t>(seconds);
- abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
- return abstime;
-}
-
-inline int64_t KernelTimeout::MakeAbsNanos() const {
- if (ns_ == 0) {
- ABSL_RAW_LOG(
- ERROR, "Tried to create a timeout from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- return (std::numeric_limits<int64_t>::max)();
- }
-
- return ns_;
-}
+ private:
+ // Returns the current time, expressed as a count of nanoseconds since the
+ // epoch used by an arbitrary clock. The implementation tries to use a steady
+ // (monotonic) clock if one is available.
+ static int64_t SteadyClockNow();
+
+ // Internal representation.
+ // - If the value is kNoTimeout, then the timeout is infinite, and
+ // has_timeout() will return true.
+ // - If the low bit is 0, then the high 63 bits is the number of nanoseconds
+ // after the unix epoch.
+ // - If the low bit is 1, then the high 63 bits is the number of nanoseconds
+ // after the epoch used by SteadyClockNow().
+ //
+ // In all cases the time is stored as an absolute time, the only difference is
+ // the clock epoch. The use of absolute times is important since in the case
+ // of a relative timeout with a spurious wakeup, the program would have to
+ // restart the wait, and thus needs a way of recomputing the remaining time.
+ uint64_t rep_;
+
+ // Returns the number of nanoseconds stored in the internal representation.
+ // When combined with the clock epoch indicated by the low bit (which is
+ // accessed through is_absolute_timeout() and is_relative_timeout()), the
+ // return value is used to compute when the timeout should occur.
+ int64_t RawAbsNanos() const { return static_cast<int64_t>(rep_ >> 1); }
+
+ // Converts to nanoseconds from now. Since the return value is a relative
+ // duration, it should be recomputed by calling this method in the case of a
+ // spurious wakeup.
+ int64_t InNanosecondsFromNow() const;
+
+ // A value that represents no timeout (or an infinite timeout).
+ static constexpr uint64_t kNoTimeout = (std::numeric_limits<uint64_t>::max)();
+
+ // The maximum value that can be stored in the high 63 bits.
+ static constexpr int64_t kMaxNanos = (std::numeric_limits<int64_t>::max)();
+};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
diff --git a/absl/synchronization/internal/kernel_timeout_test.cc b/absl/synchronization/internal/kernel_timeout_test.cc
new file mode 100644
index 00000000..92ed2691
--- /dev/null
+++ b/absl/synchronization/internal/kernel_timeout_test.cc
@@ -0,0 +1,394 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#include <ctime>
+#include <chrono> // NOLINT(build/c++11)
+#include <limits>
+
+#include "absl/base/config.h"
+#include "absl/random/random.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "gtest/gtest.h"
+
+// Test go/btm support by randomizing the value of clock_gettime() for
+// CLOCK_MONOTONIC. This works by overriding a weak symbol in glibc.
+// We should be resistant to this randomization when !SupportsSteadyClock().
+#if defined(__GOOGLE_GRTE_VERSION__) && \
+ !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+ !defined(ABSL_HAVE_MEMORY_SANITIZER) && \
+ !defined(ABSL_HAVE_THREAD_SANITIZER)
+extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
+
+extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
+ if (c == CLOCK_MONOTONIC &&
+ !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
+ absl::SharedBitGen gen;
+ ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
+ ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
+ return 0;
+ }
+ return __clock_gettime(c, ts);
+}
+#endif
+
+namespace {
+
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_MEMORY_SANITIZER) || \
+ defined(ABSL_HAVE_THREAD_SANITIZER) || \
+ defined(__ANDROID__) || \
+ defined(_WIN32) || defined(_WIN64)
+constexpr absl::Duration kTimingBound = absl::Milliseconds(5);
+#else
+constexpr absl::Duration kTimingBound = absl::Microseconds(250);
+#endif
+
+using absl::synchronization_internal::KernelTimeout;
+
+TEST(KernelTimeout, FiniteTimes) {
+ constexpr absl::Duration kDurationsToTest[] = {
+ absl::ZeroDuration(),
+ absl::Nanoseconds(1),
+ absl::Microseconds(1),
+ absl::Milliseconds(1),
+ absl::Seconds(1),
+ absl::Minutes(1),
+ absl::Hours(1),
+ absl::Hours(1000),
+ -absl::Nanoseconds(1),
+ -absl::Microseconds(1),
+ -absl::Milliseconds(1),
+ -absl::Seconds(1),
+ -absl::Minutes(1),
+ -absl::Hours(1),
+ -absl::Hours(1000),
+ };
+
+ for (auto duration : kDurationsToTest) {
+ const absl::Time now = absl::Now();
+ const absl::Time when = now + duration;
+ SCOPED_TRACE(duration);
+ KernelTimeout t(when);
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_TRUE(t.is_absolute_timeout());
+ EXPECT_FALSE(t.is_relative_timeout());
+ EXPECT_EQ(absl::TimeFromTimespec(t.MakeAbsTimespec()), when);
+#ifndef _WIN32
+ EXPECT_LE(
+ absl::AbsDuration(absl::Now() + duration -
+ absl::TimeFromTimespec(
+ t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
+ absl::Milliseconds(10));
+#endif
+ EXPECT_LE(
+ absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
+ std::max(duration, absl::ZeroDuration())),
+ kTimingBound);
+ EXPECT_EQ(absl::FromUnixNanos(t.MakeAbsNanos()), when);
+ EXPECT_LE(absl::AbsDuration(absl::Milliseconds(t.InMillisecondsFromNow()) -
+ std::max(duration, absl::ZeroDuration())),
+ absl::Milliseconds(5));
+ EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoTimePoint()) - when),
+ absl::Microseconds(1));
+ EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) -
+ std::max(duration, absl::ZeroDuration())),
+ kTimingBound);
+ }
+}
+
+TEST(KernelTimeout, InfiniteFuture) {
+ KernelTimeout t(absl::InfiniteFuture());
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, DefaultConstructor) {
+ // The default constructor is equivalent to absl::InfiniteFuture().
+ KernelTimeout t;
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, TimeMaxNanos) {
+ // Time >= kMaxNanos should behave as no timeout.
+ KernelTimeout t(absl::FromUnixNanos(std::numeric_limits<int64_t>::max()));
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, Never) {
+ // KernelTimeout::Never() is equivalent to absl::InfiniteFuture().
+ KernelTimeout t = KernelTimeout::Never();
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, InfinitePast) {
+ KernelTimeout t(absl::InfinitePast());
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_TRUE(t.is_absolute_timeout());
+ EXPECT_FALSE(t.is_relative_timeout());
+ EXPECT_LE(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::FromUnixNanos(1));
+#ifndef _WIN32
+ EXPECT_LE(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::FromUnixSeconds(1));
+#endif
+ EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::ZeroDuration());
+ EXPECT_LE(absl::FromUnixNanos(t.MakeAbsNanos()), absl::FromUnixNanos(1));
+ EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+ EXPECT_LT(t.ToChronoTimePoint(), std::chrono::system_clock::from_time_t(0) +
+ std::chrono::seconds(1));
+ EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
+}
+
+TEST(KernelTimeout, FiniteDurations) {
+ constexpr absl::Duration kDurationsToTest[] = {
+ absl::ZeroDuration(),
+ absl::Nanoseconds(1),
+ absl::Microseconds(1),
+ absl::Milliseconds(1),
+ absl::Seconds(1),
+ absl::Minutes(1),
+ absl::Hours(1),
+ absl::Hours(1000),
+ };
+
+ for (auto duration : kDurationsToTest) {
+ SCOPED_TRACE(duration);
+ KernelTimeout t(duration);
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_FALSE(t.is_absolute_timeout());
+ EXPECT_TRUE(t.is_relative_timeout());
+ EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+ absl::TimeFromTimespec(t.MakeAbsTimespec())),
+ absl::Milliseconds(5));
+#ifndef _WIN32
+ EXPECT_LE(
+ absl::AbsDuration(absl::Now() + duration -
+ absl::TimeFromTimespec(
+ t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
+ absl::Milliseconds(5));
+#endif
+ EXPECT_LE(
+ absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
+ duration),
+ kTimingBound);
+ EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+ absl::FromUnixNanos(t.MakeAbsNanos())),
+ absl::Milliseconds(5));
+ EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
+ absl::Milliseconds(5));
+ EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+ absl::FromChrono(t.ToChronoTimePoint())),
+ kTimingBound);
+ EXPECT_LE(
+ absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) - duration),
+ kTimingBound);
+ }
+}
+
+TEST(KernelTimeout, NegativeDurations) {
+ constexpr absl::Duration kDurationsToTest[] = {
+ -absl::ZeroDuration(),
+ -absl::Nanoseconds(1),
+ -absl::Microseconds(1),
+ -absl::Milliseconds(1),
+ -absl::Seconds(1),
+ -absl::Minutes(1),
+ -absl::Hours(1),
+ -absl::Hours(1000),
+ -absl::InfiniteDuration(),
+ };
+
+ for (auto duration : kDurationsToTest) {
+ // Negative durations should all be converted to zero durations or "now".
+ SCOPED_TRACE(duration);
+ KernelTimeout t(duration);
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_FALSE(t.is_absolute_timeout());
+ EXPECT_TRUE(t.is_relative_timeout());
+ EXPECT_LE(absl::AbsDuration(absl::Now() -
+ absl::TimeFromTimespec(t.MakeAbsTimespec())),
+ absl::Milliseconds(5));
+#ifndef _WIN32
+ EXPECT_LE(absl::AbsDuration(absl::Now() - absl::TimeFromTimespec(
+ t.MakeClockAbsoluteTimespec(
+ CLOCK_REALTIME))),
+ absl::Milliseconds(5));
+#endif
+ EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::ZeroDuration());
+ EXPECT_LE(
+ absl::AbsDuration(absl::Now() - absl::FromUnixNanos(t.MakeAbsNanos())),
+ absl::Milliseconds(5));
+ EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+ EXPECT_LE(absl::AbsDuration(absl::Now() -
+ absl::FromChrono(t.ToChronoTimePoint())),
+ absl::Milliseconds(5));
+ EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
+ }
+}
+
+TEST(KernelTimeout, InfiniteDuration) {
+ KernelTimeout t(absl::InfiniteDuration());
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, DurationMaxNanos) {
+ // Duration >= kMaxNanos should behave as no timeout.
+ KernelTimeout t(absl::Nanoseconds(std::numeric_limits<int64_t>::max()));
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, OverflowNanos) {
+ // Test what happens when KernelTimeout is constructed with an absl::Duration
+ // that would overflow now_nanos + duration.
+ int64_t now_nanos = absl::ToUnixNanos(absl::Now());
+ int64_t limit = std::numeric_limits<int64_t>::max() - now_nanos;
+ absl::Duration duration = absl::Nanoseconds(limit) + absl::Seconds(1);
+ KernelTimeout t(duration);
+ // Timeouts should still be far in the future.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
+ absl::Milliseconds(5));
+ EXPECT_GT(t.ToChronoTimePoint(),
+ std::chrono::system_clock::now() + std::chrono::hours(100000));
+ EXPECT_GT(t.ToChronoDuration(), std::chrono::hours(100000));
+}
+
+} // namespace
diff --git a/absl/synchronization/internal/per_thread_sem.cc b/absl/synchronization/internal/per_thread_sem.cc
index 469e8f32..c9b8dc1e 100644
--- a/absl/synchronization/internal/per_thread_sem.cc
+++ b/absl/synchronization/internal/per_thread_sem.cc
@@ -40,13 +40,6 @@ std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
return identity->blocked_count_ptr;
}
-void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
- new (Waiter::GetWaiter(identity)) Waiter();
- identity->ticker.store(0, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
-}
-
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
@@ -54,7 +47,7 @@ void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
// Wakeup the waiting thread since it is time for it to become idle.
- Waiter::GetWaiter(identity)->Poke();
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity);
}
}
@@ -64,11 +57,22 @@ ABSL_NAMESPACE_END
extern "C" {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
+ absl::base_internal::ThreadIdentity *identity) {
+ new (absl::synchronization_internal::Waiter::GetWaiter(identity))
+ absl::synchronization_internal::Waiter();
+}
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+ absl::base_internal::ThreadIdentity *identity) {
+ absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke();
+}
+
ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h
index 90a88809..144ab3cd 100644
--- a/absl/synchronization/internal/per_thread_sem.h
+++ b/absl/synchronization/internal/per_thread_sem.h
@@ -64,7 +64,7 @@ class PerThreadSem {
private:
// Create the PerThreadSem associated with "identity". Initializes count=0.
// REQUIRES: May only be called by ThreadIdentity.
- static void Init(base_internal::ThreadIdentity* identity);
+ static inline void Init(base_internal::ThreadIdentity* identity);
// Increments "identity"'s count.
static inline void Post(base_internal::ThreadIdentity* identity);
@@ -91,12 +91,21 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
+ absl::base_internal::ThreadIdentity* identity);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity* identity);
bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+ absl::base_internal::ThreadIdentity* identity);
} // extern "C"
+void absl::synchronization_internal::PerThreadSem::Init(
+ absl::base_internal::ThreadIdentity* identity) {
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(identity);
+}
+
void absl::synchronization_internal::PerThreadSem::Post(
absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
diff --git a/absl/synchronization/internal/pthread_waiter.cc b/absl/synchronization/internal/pthread_waiter.cc
new file mode 100644
index 00000000..bf700e95
--- /dev/null
+++ b/absl/synchronization/internal/pthread_waiter.cc
@@ -0,0 +1,167 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/pthread_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_PTHREAD_WAITER
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <cassert>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+namespace {
+class PthreadMutexHolder {
+ public:
+ explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+ const int err = pthread_mutex_lock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+ }
+ }
+
+ PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+ PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+ ~PthreadMutexHolder() {
+ const int err = pthread_mutex_unlock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+ }
+ }
+
+ private:
+ pthread_mutex_t *mu_;
+};
+} // namespace
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char PthreadWaiter::kName[];
+#endif
+
+PthreadWaiter::PthreadWaiter() : waiter_count_(0), wakeup_count_(0) {
+ const int err = pthread_mutex_init(&mu_, 0);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_init(&cv_, 0);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+ }
+}
+
+#ifdef __APPLE__
+#define ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP 1
+#endif
+
+#if defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#endif
+
+// Calls pthread_cond_timedwait() or possibly something else like
+// pthread_cond_timedwait_relative_np() depending on the platform and
+// KernelTimeout requested. The return value is the same as the return
+// value of pthread_cond_timedwait().
+int PthreadWaiter::TimedWait(KernelTimeout t) {
+ assert(t.has_timeout());
+ if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#ifdef ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
+ const auto rel_timeout = t.MakeRelativeTimespec();
+ return pthread_cond_timedwait_relative_np(&cv_, &mu_, &rel_timeout);
+#elif defined(ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT) && \
+ defined(CLOCK_MONOTONIC)
+ const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+ return pthread_cond_clockwait(&cv_, &mu_, CLOCK_MONOTONIC,
+ &abs_clock_timeout);
+#endif
+ }
+
+ const auto abs_timeout = t.MakeAbsTimespec();
+ return pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+}
+
+bool PthreadWaiter::Wait(KernelTimeout t) {
+ PthreadMutexHolder h(&mu_);
+ ++waiter_count_;
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ const int err = pthread_cond_wait(&cv_, &mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ } else {
+ const int err = TimedWait(t);
+ if (err == ETIMEDOUT) {
+ --waiter_count_;
+ return false;
+ }
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "PthreadWaiter::TimedWait() failed: %d", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void PthreadWaiter::Post() {
+ PthreadMutexHolder h(&mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void PthreadWaiter::Poke() {
+ PthreadMutexHolder h(&mu_);
+ InternalCondVarPoke();
+}
+
+void PthreadWaiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ const int err = pthread_cond_signal(&cv_);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+ }
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_PTHREAD_WAITER
diff --git a/absl/synchronization/internal/pthread_waiter.h b/absl/synchronization/internal/pthread_waiter.h
new file mode 100644
index 00000000..206aefa4
--- /dev/null
+++ b/absl/synchronization/internal/pthread_waiter.h
@@ -0,0 +1,60 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_PTHREAD_WAITER 1
+
+class PthreadWaiter : public WaiterCrtp<PthreadWaiter> {
+ public:
+ PthreadWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "PthreadWaiter";
+
+ private:
+ int TimedWait(KernelTimeout t);
+
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ pthread_mutex_t mu_;
+ pthread_cond_t cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ndef _WIN32
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
diff --git a/absl/synchronization/internal/sem_waiter.cc b/absl/synchronization/internal/sem_waiter.cc
new file mode 100644
index 00000000..d62dbdc7
--- /dev/null
+++ b/absl/synchronization/internal/sem_waiter.cc
@@ -0,0 +1,122 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/sem_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_SEM_WAITER
+
+#include <semaphore.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char SemWaiter::kName[];
+#endif
+
+SemWaiter::SemWaiter() : wakeups_(0) {
+ if (sem_init(&sem_, 0, 0) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+ }
+}
+
+#if defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#endif
+
+// Calls sem_timedwait() or possibly something else like
+// sem_clockwait() depending on the platform and
+// KernelTimeout requested. The return value is the same as a call to the return
+// value to a call to sem_timedwait().
+int SemWaiter::TimedWait(KernelTimeout t) {
+ if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#if defined(ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT) && defined(CLOCK_MONOTONIC)
+ const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+ return sem_clockwait(&sem_, CLOCK_MONOTONIC, &abs_clock_timeout);
+#endif
+ }
+
+ const auto abs_timeout = t.MakeAbsTimespec();
+ return sem_timedwait(&sem_, &abs_timeout);
+}
+
+bool SemWaiter::Wait(KernelTimeout t) {
+ // Loop until we timeout or consume a wakeup.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int x = wakeups_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!wakeups_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ return true;
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ // Nothing to consume, wait (looping on EINTR).
+ while (true) {
+ if (!t.has_timeout()) {
+ if (sem_wait(&sem_) == 0) break;
+ if (errno == EINTR) continue;
+ ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+ } else {
+ if (TimedWait(t) == 0) break;
+ if (errno == EINTR) continue;
+ if (errno == ETIMEDOUT) return false;
+ ABSL_RAW_LOG(FATAL, "SemWaiter::TimedWait() failed: %d", errno);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void SemWaiter::Post() {
+ // Post a wakeup.
+ if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void SemWaiter::Poke() {
+ if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
+ ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_SEM_WAITER
diff --git a/absl/synchronization/internal/sem_waiter.h b/absl/synchronization/internal/sem_waiter.h
new file mode 100644
index 00000000..c22746f9
--- /dev/null
+++ b/absl/synchronization/internal/sem_waiter.h
@@ -0,0 +1,65 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/futex.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_SEM_WAITER 1
+
+class SemWaiter : public WaiterCrtp<SemWaiter> {
+ public:
+ SemWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "SemWaiter";
+
+ private:
+ int TimedWait(KernelTimeout t);
+
+ sem_t sem_;
+
+ // This seems superfluous, but for Poke() we need to cause spurious
+ // wakeups on the semaphore. Hence we can't actually use the
+ // semaphore's count.
+ std::atomic<int> wakeups_;
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_HAVE_SEMAPHORE_H
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
diff --git a/absl/synchronization/internal/stdcpp_waiter.cc b/absl/synchronization/internal/stdcpp_waiter.cc
new file mode 100644
index 00000000..355718a7
--- /dev/null
+++ b/absl/synchronization/internal/stdcpp_waiter.cc
@@ -0,0 +1,91 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_STDCPP_WAITER
+
+#include <chrono> // NOLINT(build/c++11)
+#include <condition_variable> // NOLINT(build/c++11)
+#include <mutex> // NOLINT(build/c++11)
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char StdcppWaiter::kName[];
+#endif
+
+StdcppWaiter::StdcppWaiter() : waiter_count_(0), wakeup_count_(0) {}
+
+bool StdcppWaiter::Wait(KernelTimeout t) {
+ std::unique_lock<std::mutex> lock(mu_);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ cv_.wait(lock);
+ } else {
+ auto wait_result = t.SupportsSteadyClock() && t.is_relative_timeout()
+ ? cv_.wait_for(lock, t.ToChronoDuration())
+ : cv_.wait_until(lock, t.ToChronoTimePoint());
+ if (wait_result == std::cv_status::timeout) {
+ --waiter_count_;
+ return false;
+ }
+ }
+ first_pass = false;
+ }
+
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void StdcppWaiter::Post() {
+ std::lock_guard<std::mutex> lock(mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void StdcppWaiter::Poke() {
+ std::lock_guard<std::mutex> lock(mu_);
+ InternalCondVarPoke();
+}
+
+void StdcppWaiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ cv_.notify_one();
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_STDCPP_WAITER
diff --git a/absl/synchronization/internal/stdcpp_waiter.h b/absl/synchronization/internal/stdcpp_waiter.h
new file mode 100644
index 00000000..e592a27b
--- /dev/null
+++ b/absl/synchronization/internal/stdcpp_waiter.h
@@ -0,0 +1,56 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+
+#include <condition_variable> // NOLINT(build/c++11)
+#include <mutex> // NOLINT(build/c++11)
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_STDCPP_WAITER 1
+
+class StdcppWaiter : public WaiterCrtp<StdcppWaiter> {
+ public:
+ StdcppWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "StdcppWaiter";
+
+ private:
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ std::mutex mu_;
+ std::condition_variable cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
deleted file mode 100644
index f2051d67..00000000
--- a/absl/synchronization/internal/waiter.cc
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/waiter.h"
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <pthread.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <new>
-#include <type_traits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-static void MaybeBecomeIdle() {
- base_internal::ThreadIdentity *identity =
- base_internal::CurrentThreadIdentityIfPresent();
- assert(identity != nullptr);
- const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
- const int ticker = identity->ticker.load(std::memory_order_relaxed);
- const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
- if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
- identity->is_idle.store(true, std::memory_order_relaxed);
- }
-}
-
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-
-Waiter::Waiter() {
- futex_.store(0, std::memory_order_relaxed);
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- // Loop until we can atomically decrement futex from a positive
- // value, waiting on a futex while we believe it is zero.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
-
- while (true) {
- int32_t x = futex_.load(std::memory_order_relaxed);
- while (x != 0) {
- if (!futex_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- return true; // Consumed a wakeup, we are done.
- }
-
- if (!first_pass) MaybeBecomeIdle();
- const int err = Futex::WaitUntil(&futex_, 0, t);
- if (err != 0) {
- if (err == -EINTR || err == -EWOULDBLOCK) {
- // Do nothing, the loop will retry.
- } else if (err == -ETIMEDOUT) {
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- if (futex_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- // Wake one thread waiting on the futex.
- const int err = Futex::Wake(&futex_, 1);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-
-class PthreadMutexHolder {
- public:
- explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
- const int err = pthread_mutex_lock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
- }
- }
-
- PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
- PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
-
- ~PthreadMutexHolder() {
- const int err = pthread_mutex_unlock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
- }
- }
-
- private:
- pthread_mutex_t *mu_;
-};
-
-Waiter::Waiter() {
- const int err = pthread_mutex_init(&mu_, 0);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
- }
-
- const int err2 = pthread_cond_init(&cv_, 0);
- if (err2 != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
- }
-
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- PthreadMutexHolder h(&mu_);
- ++waiter_count_;
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!t.has_timeout()) {
- const int err = pthread_cond_wait(&cv_, &mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
- }
- } else {
- const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
- if (err == ETIMEDOUT) {
- --waiter_count_;
- return false;
- }
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- PthreadMutexHolder h(&mu_);
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- PthreadMutexHolder h(&mu_);
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- const int err = pthread_cond_signal(&cv_);
- if (ABSL_PREDICT_FALSE(err != 0)) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
- }
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-
-Waiter::Waiter() {
- if (sem_init(&sem_, 0, 0) != 0) {
- ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
- }
- wakeups_.store(0, std::memory_order_relaxed);
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- // Loop until we timeout or consume a wakeup.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (true) {
- int x = wakeups_.load(std::memory_order_relaxed);
- while (x != 0) {
- if (!wakeups_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- // Successfully consumed a wakeup, we're done.
- return true;
- }
-
- if (!first_pass) MaybeBecomeIdle();
- // Nothing to consume, wait (looping on EINTR).
- while (true) {
- if (!t.has_timeout()) {
- if (sem_wait(&sem_) == 0) break;
- if (errno == EINTR) continue;
- ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
- } else {
- if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
- if (errno == EINTR) continue;
- if (errno == ETIMEDOUT) return false;
- ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- // Post a wakeup.
- if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
- ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-
-class Waiter::WinHelper {
- public:
- static SRWLOCK *GetLock(Waiter *w) {
- return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
- }
-
- static CONDITION_VARIABLE *GetCond(Waiter *w) {
- return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
- }
-
- static_assert(sizeof(SRWLOCK) == sizeof(void *),
- "`mu_storage_` does not have the same size as SRWLOCK");
- static_assert(alignof(SRWLOCK) == alignof(void *),
- "`mu_storage_` does not have the same alignment as SRWLOCK");
-
- static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
- "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
- "as `CONDITION_VARIABLE`");
- static_assert(
- alignof(CONDITION_VARIABLE) == alignof(void *),
- "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
-
- // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
- // and destructible because we never call their constructors or destructors.
- static_assert(std::is_trivially_constructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially constructible");
- static_assert(
- std::is_trivially_constructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially constructible");
- static_assert(std::is_trivially_destructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially destructible");
- static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially destructible");
-};
-
-class LockHolder {
- public:
- explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
- AcquireSRWLockExclusive(mu_);
- }
-
- LockHolder(const LockHolder&) = delete;
- LockHolder& operator=(const LockHolder&) = delete;
-
- ~LockHolder() {
- ReleaseSRWLockExclusive(mu_);
- }
-
- private:
- SRWLOCK* mu_;
-};
-
-Waiter::Waiter() {
- auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
- auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
- InitializeSRWLock(mu);
- InitializeConditionVariable(cv);
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- SRWLOCK *mu = WinHelper::GetLock(this);
- CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
-
- LockHolder h(mu);
- ++waiter_count_;
-
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
- // GetLastError() returns a Win32 DWORD, but we assign to
- // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
- // initialization guarantees this is not a narrowing conversion.
- const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
- if (err == ERROR_TIMEOUT) {
- --waiter_count_;
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- LockHolder h(WinHelper::GetLock(this));
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- LockHolder h(WinHelper::GetLock(this));
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- WakeConditionVariable(WinHelper::GetCond(this));
- }
-}
-
-#else
-#error Unknown ABSL_WAITER_MODE
-#endif
-
-} // namespace synchronization_internal
-ABSL_NAMESPACE_END
-} // namespace absl
diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h
index b8adfeb5..1a8b0b83 100644
--- a/absl/synchronization/internal/waiter.h
+++ b/absl/synchronization/internal/waiter.h
@@ -17,142 +17,48 @@
#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <sdkddkver.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/futex.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex_waiter.h"
+#include "absl/synchronization/internal/pthread_waiter.h"
+#include "absl/synchronization/internal/sem_waiter.h"
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+#include "absl/synchronization/internal/win32_waiter.h"
// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
#define ABSL_WAITER_MODE_FUTEX 0
#define ABSL_WAITER_MODE_SEM 1
#define ABSL_WAITER_MODE_CONDVAR 2
#define ABSL_WAITER_MODE_WIN32 3
+#define ABSL_WAITER_MODE_STDCPP 4
#if defined(ABSL_FORCE_WAITER_MODE)
#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
-#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+#elif defined(ABSL_INTERNAL_HAVE_WIN32_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
-#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
+#elif defined(ABSL_INTERNAL_HAVE_FUTEX_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(ABSL_HAVE_SEMAPHORE_H)
+#elif defined(ABSL_INTERNAL_HAVE_SEM_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
-#else
+#elif defined(ABSL_INTERNAL_HAVE_PTHREAD_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
+#else
+#error ABSL_WAITER_MODE is undefined
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-// Waiter is an OS-specific semaphore.
-class Waiter {
- public:
- // Prepare any data to track waits.
- Waiter();
-
- // Not copyable or movable
- Waiter(const Waiter&) = delete;
- Waiter& operator=(const Waiter&) = delete;
-
- // Blocks the calling thread until a matching call to `Post()` or
- // `t` has passed. Returns `true` if woken (`Post()` called),
- // `false` on timeout.
- bool Wait(KernelTimeout t);
-
- // Restart the caller of `Wait()` as with a normal semaphore.
- void Post();
-
- // If anyone is waiting, wake them up temporarily and cause them to
- // call `MaybeBecomeIdle()`. They will then return to waiting for a
- // `Post()` or timeout.
- void Poke();
-
- // Returns the Waiter associated with the identity.
- static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
- static_assert(
- sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
- "Insufficient space for Waiter");
- return reinterpret_cast<Waiter*>(identity->waiter_state.data);
- }
-
- // How many periods to remain idle before releasing resources
-#ifndef ABSL_HAVE_THREAD_SANITIZER
- static constexpr int kIdlePeriods = 60;
-#else
- // Memory consumption under ThreadSanitizer is a serious concern,
- // so we release resources sooner. The value of 1 leads to 1 to 2 second
- // delay before marking a thread as idle.
- static const int kIdlePeriods = 1;
-#endif
-
- private:
- // The destructor must not be called since Mutex/CondVar
- // can use PerThreadSem/Waiter after the thread exits.
- // Waiter objects are embedded in ThreadIdentity objects,
- // which are reused via a freelist and are never destroyed.
- ~Waiter() = delete;
-
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
- // Futexes are defined by specification to be 32-bits.
- // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
- std::atomic<int32_t> futex_;
- static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
- // REQUIRES: mu_ must be held.
- void InternalCondVarPoke();
-
- pthread_mutex_t mu_;
- pthread_cond_t cv_;
- int waiter_count_;
- int wakeup_count_; // Unclaimed wakeups.
-
+using Waiter = FutexWaiter;
#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
- sem_t sem_;
- // This seems superfluous, but for Poke() we need to cause spurious
- // wakeups on the semaphore. Hence we can't actually use the
- // semaphore's count.
- std::atomic<int> wakeups_;
-
+using Waiter = SemWaiter;
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+using Waiter = PthreadWaiter;
#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
- // WinHelper - Used to define utilities for accessing the lock and
- // condition variable storage once the types are complete.
- class WinHelper;
-
- // REQUIRES: WinHelper::GetLock(this) must be held.
- void InternalCondVarPoke();
-
- // We can't include Windows.h in our headers, so we use aligned character
- // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
- // SRW locks and condition variables do not need to be explicitly destroyed.
- // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
- // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
- alignas(void*) unsigned char mu_storage_[sizeof(void*)];
- alignas(void*) unsigned char cv_storage_[sizeof(void*)];
- int waiter_count_;
- int wakeup_count_;
-
-#else
- #error Unknown ABSL_WAITER_MODE
+using Waiter = Win32Waiter;
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_STDCPP
+using Waiter = StdcppWaiter;
#endif
-};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
diff --git a/absl/synchronization/internal/waiter_base.cc b/absl/synchronization/internal/waiter_base.cc
new file mode 100644
index 00000000..46928b40
--- /dev/null
+++ b/absl/synchronization/internal/waiter_base.cc
@@ -0,0 +1,42 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter_base.h"
+
+#include "absl/base/config.h"
+#include "absl/base/internal/thread_identity.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int WaiterBase::kIdlePeriods;
+#endif
+
+void WaiterBase::MaybeBecomeIdle() {
+ base_internal::ThreadIdentity *identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ assert(identity != nullptr);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ const int ticker = identity->ticker.load(std::memory_order_relaxed);
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ if (!is_idle && ticker - wait_start > kIdlePeriods) {
+ identity->is_idle.store(true, std::memory_order_relaxed);
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/synchronization/internal/waiter_base.h b/absl/synchronization/internal/waiter_base.h
new file mode 100644
index 00000000..cf175481
--- /dev/null
+++ b/absl/synchronization/internal/waiter_base.h
@@ -0,0 +1,90 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+
+#include "absl/base/config.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// `Waiter` is a platform specific semaphore implementation that `PerThreadSem`
+// waits on to implement blocking in `absl::Mutex`. Implementations should
+// inherit from `WaiterCrtp` and must implement `Wait()`, `Post()`, and `Poke()`
+// as described in `WaiterBase`. `waiter.h` selects the implementation and uses
+// static-dispatch for performance.
+class WaiterBase {
+ public:
+ WaiterBase() = default;
+
+ // Not copyable or movable
+ WaiterBase(const WaiterBase&) = delete;
+ WaiterBase& operator=(const WaiterBase&) = delete;
+
+ // Blocks the calling thread until a matching call to `Post()` or
+ // `t` has passed. Returns `true` if woken (`Post()` called),
+ // `false` on timeout.
+ //
+ // bool Wait(KernelTimeout t);
+
+ // Restart the caller of `Wait()` as with a normal semaphore.
+ //
+ // void Post();
+
+ // If anyone is waiting, wake them up temporarily and cause them to
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
+ // `Post()` or timeout.
+ //
+ // void Poke();
+
+ // Returns the name of this implementation. Used only for debugging.
+ //
+ // static constexpr char kName[];
+
+ // How many periods to remain idle before releasing resources
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+ static constexpr int kIdlePeriods = 60;
+#else
+ // Memory consumption under ThreadSanitizer is a serious concern,
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
+ // delay before marking a thread as idle.
+ static constexpr int kIdlePeriods = 1;
+#endif
+
+ protected:
+ static void MaybeBecomeIdle();
+};
+
+template <typename T>
+class WaiterCrtp : public WaiterBase {
+ public:
+ // Returns the Waiter associated with the identity.
+ static T* GetWaiter(base_internal::ThreadIdentity* identity) {
+ static_assert(
+ sizeof(T) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+ "Insufficient space for Waiter");
+ return reinterpret_cast<T*>(identity->waiter_state.data);
+ }
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
diff --git a/absl/synchronization/internal/waiter_test.cc b/absl/synchronization/internal/waiter_test.cc
new file mode 100644
index 00000000..992db29b
--- /dev/null
+++ b/absl/synchronization/internal/waiter_test.cc
@@ -0,0 +1,180 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter.h"
+
+#include <ctime>
+#include <iostream>
+#include <ostream>
+
+#include "absl/base/config.h"
+#include "absl/random/random.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/futex_waiter.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/pthread_waiter.h"
+#include "absl/synchronization/internal/sem_waiter.h"
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/internal/win32_waiter.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "gtest/gtest.h"
+
+// Test go/btm support by randomizing the value of clock_gettime() for
+// CLOCK_MONOTONIC. This works by overriding a weak symbol in glibc.
+// We should be resistant to this randomization when !SupportsSteadyClock().
+#if defined(__GOOGLE_GRTE_VERSION__) && \
+ !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+ !defined(ABSL_HAVE_MEMORY_SANITIZER) && \
+ !defined(ABSL_HAVE_THREAD_SANITIZER)
+extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
+
+extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
+ if (c == CLOCK_MONOTONIC &&
+ !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
+ absl::SharedBitGen gen;
+ ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
+ ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
+ return 0;
+ }
+ return __clock_gettime(c, ts);
+}
+#endif
+
+namespace {
+
+TEST(Waiter, PrintPlatformImplementation) {
+ // Allows us to verify that the platform is using the expected implementation.
+ std::cout << absl::synchronization_internal::Waiter::kName << std::endl;
+}
+
+template <typename T>
+class WaiterTest : public ::testing::Test {
+ public:
+ // Waiter implementations assume that a ThreadIdentity has already been
+ // created.
+ WaiterTest() {
+ absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+ }
+};
+
+TYPED_TEST_SUITE_P(WaiterTest);
+
+absl::Duration WithTolerance(absl::Duration d) { return d * 0.95; }
+
+TYPED_TEST_P(WaiterTest, WaitNoTimeout) {
+ absl::synchronization_internal::ThreadPool tp(1);
+ TypeParam waiter;
+ tp.Schedule([&]() {
+ // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+ // from `Wait()`.
+ waiter.Poke();
+ absl::SleepFor(absl::Seconds(1));
+ waiter.Poke();
+ absl::SleepFor(absl::Seconds(1));
+ waiter.Post();
+ });
+ absl::Time start = absl::Now();
+ EXPECT_TRUE(
+ waiter.Wait(absl::synchronization_internal::KernelTimeout::Never()));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Seconds(2)));
+}
+
+TYPED_TEST_P(WaiterTest, WaitDurationWoken) {
+ absl::synchronization_internal::ThreadPool tp(1);
+ TypeParam waiter;
+ tp.Schedule([&]() {
+ // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+ // from `Wait()`.
+ waiter.Poke();
+ absl::SleepFor(absl::Milliseconds(500));
+ waiter.Post();
+ });
+ absl::Time start = absl::Now();
+ EXPECT_TRUE(waiter.Wait(
+ absl::synchronization_internal::KernelTimeout(absl::Seconds(10))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(2));
+}
+
+TYPED_TEST_P(WaiterTest, WaitTimeWoken) {
+ absl::synchronization_internal::ThreadPool tp(1);
+ TypeParam waiter;
+ tp.Schedule([&]() {
+ // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+ // from `Wait()`.
+ waiter.Poke();
+ absl::SleepFor(absl::Milliseconds(500));
+ waiter.Post();
+ });
+ absl::Time start = absl::Now();
+ EXPECT_TRUE(waiter.Wait(absl::synchronization_internal::KernelTimeout(
+ start + absl::Seconds(10))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(2));
+}
+
+TYPED_TEST_P(WaiterTest, WaitDurationReached) {
+ TypeParam waiter;
+ absl::Time start = absl::Now();
+ EXPECT_FALSE(waiter.Wait(
+ absl::synchronization_internal::KernelTimeout(absl::Milliseconds(500))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(1));
+}
+
+TYPED_TEST_P(WaiterTest, WaitTimeReached) {
+ TypeParam waiter;
+ absl::Time start = absl::Now();
+ EXPECT_FALSE(waiter.Wait(absl::synchronization_internal::KernelTimeout(
+ start + absl::Milliseconds(500))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(1));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(WaiterTest,
+ WaitNoTimeout,
+ WaitDurationWoken,
+ WaitTimeWoken,
+ WaitDurationReached,
+ WaitTimeReached);
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Futex, WaiterTest,
+ absl::synchronization_internal::FutexWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_PTHREAD_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Pthread, WaiterTest,
+ absl::synchronization_internal::PthreadWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_SEM_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Sem, WaiterTest,
+ absl::synchronization_internal::SemWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_WIN32_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Win32, WaiterTest,
+ absl::synchronization_internal::Win32Waiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_STDCPP_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Stdcpp, WaiterTest,
+ absl::synchronization_internal::StdcppWaiter);
+#endif
+
+} // namespace
diff --git a/absl/synchronization/internal/win32_waiter.cc b/absl/synchronization/internal/win32_waiter.cc
new file mode 100644
index 00000000..bd95ff08
--- /dev/null
+++ b/absl/synchronization/internal/win32_waiter.cc
@@ -0,0 +1,151 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/win32_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_WIN32_WAITER
+
+#include <windows.h>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char Win32Waiter::kName[];
+#endif
+
+class Win32Waiter::WinHelper {
+ public:
+ static SRWLOCK *GetLock(Win32Waiter *w) {
+ return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
+ }
+
+ static CONDITION_VARIABLE *GetCond(Win32Waiter *w) {
+ return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
+ }
+
+ static_assert(sizeof(SRWLOCK) == sizeof(void *),
+ "`mu_storage_` does not have the same size as SRWLOCK");
+ static_assert(alignof(SRWLOCK) == alignof(void *),
+ "`mu_storage_` does not have the same alignment as SRWLOCK");
+
+ static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
+ "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
+ "as `CONDITION_VARIABLE`");
+ static_assert(
+ alignof(CONDITION_VARIABLE) == alignof(void *),
+ "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
+
+ // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
+ // and destructible because we never call their constructors or destructors.
+ static_assert(std::is_trivially_constructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially constructible");
+ static_assert(
+ std::is_trivially_constructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially constructible");
+ static_assert(std::is_trivially_destructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially destructible");
+ static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially destructible");
+};
+
+class LockHolder {
+ public:
+ explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+ AcquireSRWLockExclusive(mu_);
+ }
+
+ LockHolder(const LockHolder&) = delete;
+ LockHolder& operator=(const LockHolder&) = delete;
+
+ ~LockHolder() {
+ ReleaseSRWLockExclusive(mu_);
+ }
+
+ private:
+ SRWLOCK* mu_;
+};
+
+Win32Waiter::Win32Waiter() {
+ auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
+ auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
+ InitializeSRWLock(mu);
+ InitializeConditionVariable(cv);
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+bool Win32Waiter::Wait(KernelTimeout t) {
+ SRWLOCK *mu = WinHelper::GetLock(this);
+ CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
+
+ LockHolder h(mu);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
+ if (err == ERROR_TIMEOUT) {
+ --waiter_count_;
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Win32Waiter::Post() {
+ LockHolder h(WinHelper::GetLock(this));
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Win32Waiter::Poke() {
+ LockHolder h(WinHelper::GetLock(this));
+ InternalCondVarPoke();
+}
+
+void Win32Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ WakeConditionVariable(WinHelper::GetCond(this));
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_WIN32_WAITER
diff --git a/absl/synchronization/internal/win32_waiter.h b/absl/synchronization/internal/win32_waiter.h
new file mode 100644
index 00000000..87eb617c
--- /dev/null
+++ b/absl/synchronization/internal/win32_waiter.h
@@ -0,0 +1,70 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+
+#ifdef _WIN32
+#include <sdkddkver.h>
+#endif
+
+#if defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_WIN32_WAITER 1
+
+class Win32Waiter : public WaiterCrtp<Win32Waiter> {
+ public:
+ Win32Waiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "Win32Waiter";
+
+ private:
+ // WinHelper - Used to define utilities for accessing the lock and
+ // condition variable storage once the types are complete.
+ class WinHelper;
+
+ // REQUIRES: WinHelper::GetLock(this) must be held.
+ void InternalCondVarPoke();
+
+ // We can't include Windows.h in our headers, so we use aligned character
+ // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+ // SRW locks and condition variables do not need to be explicitly destroyed.
+ // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+ // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
+ alignas(void*) unsigned char mu_storage_[sizeof(void*)];
+ alignas(void*) unsigned char cv_storage_[sizeof(void*)];
+ int waiter_count_;
+ int wakeup_count_;
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
diff --git a/absl/synchronization/lifetime_test.cc b/absl/synchronization/lifetime_test.cc
index e6274232..d5ce35a1 100644
--- a/absl/synchronization/lifetime_test.cc
+++ b/absl/synchronization/lifetime_test.cc
@@ -18,8 +18,8 @@
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/thread_annotations.h"
+#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
@@ -35,20 +35,20 @@ namespace {
// Thread two waits on 'notification', then sets 'state' inside the 'mutex',
// signalling the change via 'condvar'.
//
-// These tests use ABSL_RAW_CHECK to validate invariants, rather than EXPECT or
-// ASSERT from gUnit, because we need to invoke them during global destructors,
-// when gUnit teardown would have already begun.
+// These tests use CHECK to validate invariants, rather than EXPECT or ASSERT
+// from gUnit, because we need to invoke them during global destructors, when
+// gUnit teardown would have already begun.
void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar,
absl::Notification* notification, bool* state) {
// Test that the notification is in a valid initial state.
- ABSL_RAW_CHECK(!notification->HasBeenNotified(), "invalid Notification");
- ABSL_RAW_CHECK(*state == false, "*state not initialized");
+ CHECK(!notification->HasBeenNotified()) << "invalid Notification";
+ CHECK(!*state) << "*state not initialized";
{
absl::MutexLock lock(mutex);
notification->Notify();
- ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+ CHECK(notification->HasBeenNotified()) << "invalid Notification";
while (*state == false) {
condvar->Wait(mutex);
@@ -58,11 +58,11 @@ void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar,
void ThreadTwo(absl::Mutex* mutex, absl::CondVar* condvar,
absl::Notification* notification, bool* state) {
- ABSL_RAW_CHECK(*state == false, "*state not initialized");
+ CHECK(!*state) << "*state not initialized";
// Wake thread one
notification->WaitForNotification();
- ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+ CHECK(notification->HasBeenNotified()) << "invalid Notification";
{
absl::MutexLock lock(mutex);
*state = true;
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 064ccb74..3aa5560a 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -35,10 +35,9 @@
#include <algorithm>
#include <atomic>
-#include <cinttypes>
#include <cstddef>
+#include <cstdlib>
#include <cstring>
-#include <iterator>
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
@@ -55,7 +54,6 @@
#include "absl/base/internal/thread_identity.h"
#include "absl/base/internal/tsan_mutex_interface.h"
#include "absl/base/optimization.h"
-#include "absl/base/port.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
#include "absl/synchronization/internal/graphcycles.h"
@@ -63,6 +61,7 @@
#include "absl/time/time.h"
using absl::base_internal::CurrentThreadIdentityIfPresent;
+using absl::base_internal::CycleClock;
using absl::base_internal::PerThreadSynch;
using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
@@ -98,18 +97,15 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
- const char *msg, const void *obj, int64_t wait_cycles)>
+ const char* msg, const void* obj, int64_t wait_cycles)>
mutex_tracer;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
- cond_var_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
- bool (*)(const void *pc, char *out, int out_size)>
- symbolizer(absl::Symbolize);
+absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
+ cond_var_tracer;
} // namespace
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock);
@@ -117,19 +113,15 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles)) {
mutex_tracer.Store(fn);
}
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
cond_var_tracer.Store(fn);
}
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
- symbolizer.Store(fn);
-}
-
namespace {
// Represents the strategy for spin and yield.
// See the comment in GetMutexGlobals() for more information.
@@ -148,25 +140,24 @@ absl::Duration MeasureTimeToYield() {
return absl::Now() - before;
}
-const MutexGlobals &GetMutexGlobals() {
+const MutexGlobals& GetMutexGlobals() {
ABSL_CONST_INIT static MutexGlobals data;
absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- const int num_cpus = absl::base_internal::NumCPUs();
- data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- // If this a uniprocessor, only yield/sleep.
- // Real-time threads are often unable to yield, so the sleep time needs
- // to be long enough to keep the calling thread asleep until scheduling
- // happens.
- // If this is multiprocessor, allow spinning. If the mode is
- // aggressive then spin many times before yielding. If the mode is
- // gentle then spin only a few times before yielding. Aggressive spinning
- // is used to ensure that an Unlock() call, which must get the spin lock
- // for any thread to make progress gets it without undue delay.
- if (num_cpus > 1) {
+ if (absl::base_internal::NumCPUs() > 1) {
+ // If this is multiprocessor, allow spinning. If the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ data.spinloop_iterations = 1500;
data.mutex_sleep_spins[AGGRESSIVE] = 5000;
data.mutex_sleep_spins[GENTLE] = 250;
data.mutex_sleep_time = absl::Microseconds(10);
} else {
+ // If this a uniprocessor, only yield/sleep. Real-time threads are often
+ // unable to yield, so the sleep time needs to be long enough to keep
+ // the calling thread asleep until scheduling happens.
+ data.spinloop_iterations = 0;
data.mutex_sleep_spins[AGGRESSIVE] = 0;
data.mutex_sleep_spins[GENTLE] = 0;
data.mutex_sleep_time = MeasureTimeToYield() * 5;
@@ -219,8 +210,7 @@ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != bits &&
((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v | bits,
- std::memory_order_release,
+ !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
std::memory_order_relaxed)));
}
@@ -235,8 +225,7 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != 0 &&
((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v & ~bits,
- std::memory_order_release,
+ !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
std::memory_order_relaxed)));
}
@@ -247,7 +236,7 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ABSL_CONST_INIT static GraphCycles* deadlock_graph
ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
@@ -291,7 +280,7 @@ enum { // Event flags
// Properties of the events.
static const struct {
int flags;
- const char *msg;
+ const char* msg;
} event_properties[] = {
{SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
{0, "TryLock failed "},
@@ -316,12 +305,12 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
// Can't be too small, as it's used for deadlock detection information.
static constexpr uint32_t kNSynchEvent = 1031;
-static struct SynchEvent { // this is a trivial hash table for the events
+static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
int refcount ABSL_GUARDED_BY(synch_event_mu);
// buckets have linear, 0-terminated chains
- SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
+ SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
// Constant after initialization
uintptr_t masked_addr; // object at this address is called "name"
@@ -329,13 +318,13 @@ static struct SynchEvent { // this is a trivial hash table for the events
// No explicit synchronization used. Instead we assume that the
// client who enables/disables invariants/logging on a Mutex does so
// while the Mutex is not being concurrently accessed by others.
- void (*invariant)(void *arg); // called on each event
- void *arg; // first arg to (*invariant)()
- bool log; // logging turned on
+ void (*invariant)(void* arg); // called on each event
+ void* arg; // first arg to (*invariant)()
+ bool log; // logging turned on
// Constant after initialization
- char name[1]; // actually longer---NUL-terminated string
-} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
+ char name[1]; // actually longer---NUL-terminated string
+}* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
// set "bits" in the word there (waiting until lockbit is clear before doing
@@ -344,11 +333,11 @@ static struct SynchEvent { // this is a trivial hash table for the events
// the string name is copied into it.
// When used with a mutex, the caller should also ensure that kMuEvent
// is set in the mutex word, and similarly for condition variables and kCVEvent.
-static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
- const char *name, intptr_t bits,
+static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
+ const char* name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
+ SynchEvent* e;
// first look for existing SynchEvent struct..
synch_event_mu.Lock();
for (e = synch_event[h];
@@ -360,9 +349,9 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
name = "";
}
size_t l = strlen(name);
- e = reinterpret_cast<SynchEvent *>(
+ e = reinterpret_cast<SynchEvent*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
- e->refcount = 2; // one for return value, one for linked list
+ e->refcount = 2; // one for return value, one for linked list
e->masked_addr = base_internal::HidePtr(addr);
e->invariant = nullptr;
e->arg = nullptr;
@@ -372,19 +361,19 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
AtomicSetBits(addr, bits, lockbit);
synch_event[h] = e;
} else {
- e->refcount++; // for return value
+ e->refcount++; // for return value
}
synch_event_mu.Unlock();
return e;
}
// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent *e) {
+static void DeleteSynchEvent(SynchEvent* e) {
base_internal::LowLevelAlloc::Free(e);
}
// Decrement the reference count of *e, or do nothing if e==null.
-static void UnrefSynchEvent(SynchEvent *e) {
+static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) {
synch_event_mu.Lock();
bool del = (--(e->refcount) == 0);
@@ -398,11 +387,11 @@ static void UnrefSynchEvent(SynchEvent *e) {
// Forget the mapping from the object (Mutex or CondVar) at address addr
// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
// is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent **pe;
- SynchEvent *e;
+ SynchEvent** pe;
+ SynchEvent* e;
synch_event_mu.Lock();
for (pe = &synch_event[h];
(e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -423,9 +412,9 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
// Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
-static SynchEvent *GetSynchEvent(const void *addr) {
+static SynchEvent* GetSynchEvent(const void* addr) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
+ SynchEvent* e;
synch_event_mu.Lock();
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -440,17 +429,17 @@ static SynchEvent *GetSynchEvent(const void *addr) {
// Called when an event "ev" occurs on a Mutex of CondVar "obj"
// if event recording is on
-static void PostSynchEvent(void *obj, int ev) {
- SynchEvent *e = GetSynchEvent(obj);
+static void PostSynchEvent(void* obj, int ev) {
+ SynchEvent* e = GetSynchEvent(obj);
// logging is on if event recording is on and either there's no event struct,
// or it explicitly says to log
if (e == nullptr || e->log) {
- void *pcs[40];
+ void* pcs[40];
int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
// A buffer with enough space for the ASCII for all the PCs, even on a
// 64-bit machine.
char buffer[ABSL_ARRAYSIZE(pcs) * 24];
- int pos = snprintf(buffer, sizeof (buffer), " @");
+ int pos = snprintf(buffer, sizeof(buffer), " @");
for (int i = 0; i != n; i++) {
int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
" %p", pcs[i]);
@@ -472,13 +461,13 @@ static void PostSynchEvent(void *obj, int ev) {
// get false positive race reports later.
// Reuse EvalConditionAnnotated to properly call into user code.
struct local {
- static bool pred(SynchEvent *ev) {
+ static bool pred(SynchEvent* ev) {
(*ev->invariant)(ev->arg);
return false;
}
};
Condition cond(&local::pred, e);
- Mutex *mu = static_cast<Mutex *>(obj);
+ Mutex* mu = static_cast<Mutex*>(obj);
const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
const bool trylock = (flags & SYNCH_F_TRY) != 0;
const bool read_lock = (flags & SYNCH_F_R) != 0;
@@ -504,32 +493,32 @@ static void PostSynchEvent(void *obj, int ev) {
// PerThreadSynch struct points at the most recent SynchWaitParams struct when
// the thread is on a Mutex's waiter queue.
struct SynchWaitParams {
- SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
- KernelTimeout timeout_arg, Mutex *cvmu_arg,
- PerThreadSynch *thread_arg,
- std::atomic<intptr_t> *cv_word_arg)
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
+ KernelTimeout timeout_arg, Mutex* cvmu_arg,
+ PerThreadSynch* thread_arg,
+ std::atomic<intptr_t>* cv_word_arg)
: how(how_arg),
cond(cond_arg),
timeout(timeout_arg),
cvmu(cvmu_arg),
thread(thread_arg),
cv_word(cv_word_arg),
- contention_start_cycles(base_internal::CycleClock::Now()),
+ contention_start_cycles(CycleClock::Now()),
should_submit_contention_data(false) {}
const Mutex::MuHow how; // How this thread needs to wait.
- const Condition *cond; // The condition that this thread is waiting for.
- // In Mutex, this field is set to zero if a timeout
- // expires.
+ const Condition* cond; // The condition that this thread is waiting for.
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
KernelTimeout timeout; // timeout expiry---absolute time
// In Mutex, this field is set to zero if a timeout
// expires.
- Mutex *const cvmu; // used for transfer from cond var to mutex
- PerThreadSynch *const thread; // thread that is waiting
+ Mutex* const cvmu; // used for transfer from cond var to mutex
+ PerThreadSynch* const thread; // thread that is waiting
// If not null, thread should be enqueued on the CondVar whose state
// word is cv_word instead of queueing normally on the Mutex.
- std::atomic<intptr_t> *cv_word;
+ std::atomic<intptr_t>* cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
// to contend for the mutex.
@@ -537,12 +526,12 @@ struct SynchWaitParams {
};
struct SynchLocksHeld {
- int n; // number of valid entries in locks[]
- bool overflow; // true iff we overflowed the array at some point
+ int n; // number of valid entries in locks[]
+ bool overflow; // true iff we overflowed the array at some point
struct {
- Mutex *mu; // lock acquired
- int32_t count; // times acquired
- GraphId id; // deadlock_graph id of acquired lock
+ Mutex* mu; // lock acquired
+ int32_t count; // times acquired
+ GraphId id; // deadlock_graph id of acquired lock
} locks[40];
// If a thread overfills the array during deadlock detection, we
// continue, discarding information as needed. If no overflow has
@@ -552,11 +541,11 @@ struct SynchLocksHeld {
// A sentinel value in lists that is not 0.
// A 0 value is used to mean "not on a list".
-static PerThreadSynch *const kPerThreadSynchNull =
- reinterpret_cast<PerThreadSynch *>(1);
+static PerThreadSynch* const kPerThreadSynchNull =
+ reinterpret_cast<PerThreadSynch*>(1);
-static SynchLocksHeld *LocksHeldAlloc() {
- SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+static SynchLocksHeld* LocksHeldAlloc() {
+ SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
ret->n = 0;
ret->overflow = false;
@@ -564,24 +553,24 @@ static SynchLocksHeld *LocksHeldAlloc() {
}
// Return the PerThreadSynch-struct for this thread.
-static PerThreadSynch *Synch_GetPerThread() {
- ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+static PerThreadSynch* Synch_GetPerThread() {
+ ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
return &identity->per_thread_synch;
}
-static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
- PerThreadSynch *w = Synch_GetPerThread();
+ PerThreadSynch* w = Synch_GetPerThread();
if (mu) {
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
return w;
}
-static SynchLocksHeld *Synch_GetAllLocks() {
- PerThreadSynch *s = Synch_GetPerThread();
+static SynchLocksHeld* Synch_GetAllLocks() {
+ PerThreadSynch* s = Synch_GetPerThread();
if (s->all_locks == nullptr) {
s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
}
@@ -589,7 +578,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
}
// Post on "w"'s associated PerThreadSem.
-void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
// We miss synchronization around passing PerThreadSynch between threads
@@ -605,7 +594,7 @@ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
}
// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
-bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
@@ -626,7 +615,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
// Mutex code checking that the "waitp" field has not been reused.
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
// Fix the per-thread state only if it exists.
- ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+ ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
if (identity != nullptr) {
identity->per_thread_synch.suppress_fatal_errors = true;
}
@@ -635,21 +624,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
std::memory_order_release);
}
-// --------------------------time support
-
-// Return the current time plus the timeout. Use the same clock as
-// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return absl::TimeFromTimeval(tv) + timeout;
-#else
- return absl::Now() + timeout;
-#endif
-}
-
// --------------------------Mutexes
// In the layout below, the msb of the bottom byte is currently unused. Also,
@@ -660,24 +634,29 @@ static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
// bit-twiddling trick in Mutex::Unlock().
// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
// to enable the bit-twiddling trick in CheckForMutexCorruption().
-static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
-static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
-static const intptr_t kMuWait = 0x0004L; // threads are waiting
-static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
-static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
+// There's a designated waker.
// INVARIANT1: there's a thread that was blocked on the mutex, is
// no longer, yet has not yet acquired the mutex. If there's a
// designated waker, all threads can avoid taking the slow path in
// unlock because the designated waker will subsequently acquire
// the lock and wake someone. To maintain INVARIANT1 the bit is
// set when a thread is unblocked(INV1a), and threads that were
-// unblocked reset the bit when they either acquire or re-block
-// (INV1b).
-static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
- // for a reader
-static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
-static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
-static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
+// unblocked reset the bit when they either acquire or re-block (INV1b).
+static const intptr_t kMuDesig = 0x0002L;
+static const intptr_t kMuWait = 0x0004L; // threads are waiting
+static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+// Runnable writer is waiting for a reader.
+// If set, new readers will not lock the mutex to avoid writer starvation.
+// Note: if a reader has higher priority than the writer, it will still lock
+// the mutex ahead of the waiting writer, but in a very inefficient manner:
+// the reader will first queue itself and block, but then the last unlocking
+// reader will wake it.
+static const intptr_t kMuWrWait = 0x0020L;
+static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
// Hack to make constant values available to gdb pretty printer
enum {
@@ -773,8 +752,8 @@ Mutex::~Mutex() {
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
-void Mutex::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+void Mutex::EnableDebugLog(const char* name) {
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
}
@@ -783,11 +762,10 @@ void EnableMutexInvariantDebugging(bool enabled) {
synch_check_invariants.store(enabled, std::memory_order_release);
}
-void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
- void *arg) {
+void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
e->invariant = invariant;
e->arg = arg;
UnrefSynchEvent(e);
@@ -803,15 +781,15 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
// waiters with the same condition, type of lock, and thread priority.
//
// Requires that x and y be waiting on the same Mutex queue.
-static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
// Given the contents of a mutex word containing a PerThreadSynch pointer,
// return the pointer.
-static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
- return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
+ return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
}
// The next several routines maintain the per-thread next and skip fields
@@ -869,17 +847,17 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// except those in the added node and the former "head" node. This implies
// that the new node is added after head, and so must be the new head or the
// new front of the queue.
-static PerThreadSynch *Skip(PerThreadSynch *x) {
- PerThreadSynch *x0 = nullptr;
- PerThreadSynch *x1 = x;
- PerThreadSynch *x2 = x->skip;
+static PerThreadSynch* Skip(PerThreadSynch* x) {
+ PerThreadSynch* x0 = nullptr;
+ PerThreadSynch* x1 = x;
+ PerThreadSynch* x2 = x->skip;
if (x2 != nullptr) {
// Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
// such that x1 == x0->skip && x2 == x1->skip
while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
- x0->skip = x2; // short-circuit skip from x0 to x2
+ x0->skip = x2; // short-circuit skip from x0 to x2
}
- x->skip = x1; // short-circuit skip from x to result
+ x->skip = x1; // short-circuit skip from x to result
}
return x1;
}
@@ -888,7 +866,7 @@ static PerThreadSynch *Skip(PerThreadSynch *x) {
// The latter is going to be removed out of order, because of a timeout.
// Check whether "ancestor" has a skip field pointing to "to_be_removed",
// and fix it if it does.
-static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
if (to_be_removed->skip != nullptr) {
ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
@@ -900,7 +878,7 @@ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
}
}
-static void CondVarEnqueue(SynchWaitParams *waitp);
+static void CondVarEnqueue(SynchWaitParams* waitp);
// Enqueue thread "waitp->thread" on a waiter queue.
// Called with mutex spinlock held if head != nullptr
@@ -921,8 +899,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp);
// returned. This mechanism is used by CondVar to queue a thread on the
// condition variable queue instead of the mutex queue in implementing Wait().
// In this case, Enqueue() can return nullptr (if head==nullptr).
-static PerThreadSynch *Enqueue(PerThreadSynch *head,
- SynchWaitParams *waitp, intptr_t mu, int flags) {
+static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
+ intptr_t mu, int flags) {
// If we have been given a cv_word, call CondVarEnqueue() and return
// the previous head of the Mutex waiter queue.
if (waitp->cv_word != nullptr) {
@@ -930,42 +908,43 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
return head;
}
- PerThreadSynch *s = waitp->thread;
+ PerThreadSynch* s = waitp->thread;
ABSL_RAW_CHECK(
s->waitp == nullptr || // normal case
s->waitp == waitp || // Fer()---transfer from condition variable
s->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
s->waitp = waitp;
- s->skip = nullptr; // maintain skip invariant (see above)
- s->may_skip = true; // always true on entering queue
- s->wake = false; // not being woken
+ s->skip = nullptr; // maintain skip invariant (see above)
+ s->may_skip = true; // always true on entering queue
+ s->wake = false; // not being woken
s->cond_waiter = ((flags & kMuIsCond) != 0);
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+ int64_t now_cycles = CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+ if (err != 0) {
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+ } else {
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles + static_cast<int64_t>(CycleClock::Frequency());
+ }
+ }
+#endif
if (head == nullptr) { // s is the only waiter
s->next = s; // it's the only entry in the cycle
s->readers = mu; // reader count is from mu word
s->maybe_unlocking = false; // no one is searching an empty list
head = s; // s is new head
} else {
- PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
+ PerThreadSynch* enqueue_after = nullptr; // we'll put s after this element
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
- int64_t now_cycles = base_internal::CycleClock::Now();
- if (s->next_priority_read_cycles < now_cycles) {
- // Every so often, update our idea of the thread's priority.
- // pthread_getschedparam() is 5% of the block/wakeup time;
- // base_internal::CycleClock::Now() is 0.5%.
- int policy;
- struct sched_param param;
- const int err = pthread_getschedparam(pthread_self(), &policy, &param);
- if (err != 0) {
- ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
- } else {
- s->priority = param.sched_priority;
- s->next_priority_read_cycles =
- now_cycles +
- static_cast<int64_t>(base_internal::CycleClock::Frequency());
- }
- }
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
@@ -975,20 +954,20 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// Within a skip chain, all waiters have the same priority, so we can
// skip forward through the chains until we find one with a lower
// priority than the waiter to be enqueued.
- PerThreadSynch *advance_to = head; // next value of enqueue_after
+ PerThreadSynch* advance_to = head; // next value of enqueue_after
do {
enqueue_after = advance_to;
// (side-effect: optimizes skip chain)
advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
- // termination guaranteed because s->priority > head->priority
- // and head is the end of a skip chain
+ // termination guaranteed because s->priority > head->priority
+ // and head is the end of a skip chain
} else if (waitp->how == kExclusive &&
Condition::GuaranteedEqual(waitp->cond, nullptr)) {
// An unlocker could be scanning the queue, but we know it will recheck
// the queue front for writers that have no condition, which is what s
// is, so an insert at front is safe.
- enqueue_after = head; // add after head, at front
+ enqueue_after = head; // add after head, at front
}
}
#endif
@@ -1013,12 +992,12 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
enqueue_after->skip = enqueue_after->next;
}
if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
- s->skip = s->next; // s may skip to its successor
+ s->skip = s->next; // s may skip to its successor
}
- } else { // enqueue not done any other way, so
- // we're inserting s at the back
+ } else { // enqueue not done any other way, so
+ // we're inserting s at the back
// s will become new head; copy data from head into it
- s->next = head->next; // add s after head
+ s->next = head->next; // add s after head
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
@@ -1037,17 +1016,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// whose last element is head. The new head element is returned, or null
// if the list is made empty.
// Dequeue is called with both spinlock and Mutex held.
-static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
- PerThreadSynch *w = pw->next;
- pw->next = w->next; // snip w out of list
- if (head == w) { // we removed the head
+static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
+ PerThreadSynch* w = pw->next;
+ pw->next = w->next; // snip w out of list
+ if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
} else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
// pw can skip to its new successor
if (pw->next->skip !=
nullptr) { // either skip to its successors skip target
pw->skip = pw->next->skip;
- } else { // or to pw's successor
+ } else { // or to pw's successor
pw->skip = pw->next;
}
}
@@ -1060,27 +1039,27 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
// singly-linked list wake_list in the order found. Assumes that
// there is only one such element if the element has how == kExclusive.
// Return the new head.
-static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
- PerThreadSynch *pw,
- PerThreadSynch **wake_tail) {
- PerThreadSynch *orig_h = head;
- PerThreadSynch *w = pw->next;
+static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
+ PerThreadSynch* pw,
+ PerThreadSynch** wake_tail) {
+ PerThreadSynch* orig_h = head;
+ PerThreadSynch* w = pw->next;
bool skipped = false;
do {
- if (w->wake) { // remove this element
+ if (w->wake) { // remove this element
ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
// we're removing pw's successor so either pw->skip is zero or we should
// already have removed pw since if pw->skip!=null, pw has the same
// condition as w.
head = Dequeue(head, pw);
- w->next = *wake_tail; // keep list terminated
- *wake_tail = w; // add w to wake_list;
- wake_tail = &w->next; // next addition to end
+ w->next = *wake_tail; // keep list terminated
+ *wake_tail = w; // add w to wake_list;
+ wake_tail = &w->next; // next addition to end
if (w->waitp->how == kExclusive) { // wake at most 1 writer
break;
}
- } else { // not waking this one; skip
- pw = Skip(w); // skip as much as possible
+ } else { // not waking this one; skip
+ pw = Skip(w); // skip as much as possible
skipped = true;
}
w = pw->next;
@@ -1098,7 +1077,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
-void Mutex::TryRemove(PerThreadSynch *s) {
+void Mutex::TryRemove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
@@ -1106,16 +1085,16 @@ void Mutex::TryRemove(PerThreadSynch *s) {
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch* h = GetPerThreadSynch(v);
if (h != nullptr) {
- PerThreadSynch *pw = h; // pw is w's predecessor
- PerThreadSynch *w;
+ PerThreadSynch* pw = h; // pw is w's predecessor
+ PerThreadSynch* w;
if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element
// If the current element isn't equivalent to the waiter to be
// removed, we can skip the entire chain.
if (!MuEquivalentWaiter(s, w)) {
- pw = Skip(w); // so skip all that won't match
+ pw = Skip(w); // so skip all that won't match
// we don't have to worry about dangling skip fields
// in the threads we skipped; none can point to s
// because they are in a different equivalence class.
@@ -1127,7 +1106,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// process the first thread again.
} while ((w = pw->next) != s && pw != h);
}
- if (w == s) { // found thread; remove it
+ if (w == s) { // found thread; remove it
// pw->skip may be non-zero here; the loop above ensured that
// no ancestor of s can skip to s, so removal is safe anyway.
h = Dequeue(h, pw);
@@ -1136,16 +1115,15 @@ void Mutex::TryRemove(PerThreadSynch *s) {
}
}
intptr_t nv;
- do { // release spinlock and lock
+ do { // release spinlock and lock
v = mu_.load(std::memory_order_relaxed);
nv = v & (kMuDesig | kMuEvent);
if (h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(h);
- h->readers = 0; // we hold writer lock
+ h->readers = 0; // we hold writer lock
h->maybe_unlocking = false; // finished unlocking
}
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
}
}
@@ -1155,7 +1133,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// if the wait extends past the absolute time specified, even if "s" is still
// on the mutex queue. In this case, remove "s" from the queue and return
// true, otherwise return false.
-void Mutex::Block(PerThreadSynch *s) {
+void Mutex::Block(PerThreadSynch* s) {
while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
// After a timeout, we go into a spin loop until we remove ourselves
@@ -1174,7 +1152,7 @@ void Mutex::Block(PerThreadSynch *s) {
// is not on the queue.
this->TryRemove(s);
}
- s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
s->waitp->cond = nullptr; // condition no longer relevant for wakeups
}
}
@@ -1184,8 +1162,8 @@ void Mutex::Block(PerThreadSynch *s) {
}
// Wake thread w, and return the next thread in the list.
-PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
- PerThreadSynch *next = w->next;
+PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
+ PerThreadSynch* next = w->next;
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
IncrementSynchSem(this, w);
@@ -1193,7 +1171,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
return next;
}
-static GraphId GetGraphIdLocked(Mutex *mu)
+static GraphId GetGraphIdLocked(Mutex* mu)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
if (!deadlock_graph) { // (re)create the deadlock graph.
deadlock_graph =
@@ -1203,7 +1181,7 @@ static GraphId GetGraphIdLocked(Mutex *mu)
return deadlock_graph->GetId(mu);
}
-static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
+static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
deadlock_graph_mu.Lock();
GraphId id = GetGraphIdLocked(mu);
deadlock_graph_mu.Unlock();
@@ -1213,7 +1191,7 @@ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
// Record a lock acquisition. This is used in debug mode for deadlock
// detection. The held_locks pointer points to the relevant data
// structure for each case.
-static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
@@ -1237,7 +1215,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
// It does not process the event if is not needed when deadlock detection is
// disabled.
-static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
@@ -1252,11 +1230,11 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
i++;
}
if (i == n) { // mu missing means releasing unheld lock
- SynchEvent *mu_events = GetSynchEvent(mu);
+ SynchEvent* mu_events = GetSynchEvent(mu);
ABSL_RAW_LOG(FATAL,
"thread releasing lock it does not hold: %p %s; "
,
- static_cast<void *>(mu),
+ static_cast<void*>(mu),
mu_events == nullptr ? "" : mu_events->name);
}
}
@@ -1273,7 +1251,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
}
// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu) {
+static inline void DebugOnlyLockEnter(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1283,7 +1261,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu) {
}
// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1293,7 +1271,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
}
// Call LockLeave() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockLeave(Mutex *mu) {
+static inline void DebugOnlyLockLeave(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1302,9 +1280,9 @@ static inline void DebugOnlyLockLeave(Mutex *mu) {
}
}
-static char *StackString(void **pcs, int n, char *buf, int maxlen,
+static char* StackString(void** pcs, int n, char* buf, int maxlen,
bool symbolize) {
- static const int kSymLen = 200;
+ static constexpr int kSymLen = 200;
char sym[kSymLen];
int len = 0;
for (int i = 0; i != n; i++) {
@@ -1312,7 +1290,7 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf;
size_t count = static_cast<size_t>(maxlen - len);
if (symbolize) {
- if (!symbolizer(pcs[i], sym, kSymLen)) {
+ if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
sym[0] = '\0';
}
snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
@@ -1325,15 +1303,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf;
}
-static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
- void *pcs[40];
+static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
+ void* pcs[40];
return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
maxlen, symbolize);
}
namespace {
-enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
- // a path this long would be remarkable
+enum {
+ kMaxDeadlockPathLen = 10
+}; // maximum length of a deadlock cycle;
+ // a path this long would be remarkable
// Buffers required to report a deadlock.
// We do not allocate them on stack to avoid large stack frame.
struct DeadlockReportBuffers {
@@ -1343,11 +1323,11 @@ struct DeadlockReportBuffers {
struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffers() {
- b = reinterpret_cast<DeadlockReportBuffers *>(
+ b = reinterpret_cast<DeadlockReportBuffers*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
}
~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
- DeadlockReportBuffers *b;
+ DeadlockReportBuffers* b;
};
// Helper to pass to GraphCycles::UpdateStackTrace.
@@ -1358,13 +1338,13 @@ int GetStack(void** stack, int max_depth) {
// Called in debug mode when a thread is about to acquire a lock in a way that
// may block.
-static GraphId DeadlockCheck(Mutex *mu) {
+static GraphId DeadlockCheck(Mutex* mu) {
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kIgnore) {
return InvalidGraphId();
}
- SynchLocksHeld *all_locks = Synch_GetAllLocks();
+ SynchLocksHeld* all_locks = Synch_GetAllLocks();
absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
const GraphId mu_id = GetGraphIdLocked(mu);
@@ -1386,8 +1366,8 @@ static GraphId DeadlockCheck(Mutex *mu) {
// For each other mutex already held by this thread:
for (int i = 0; i != all_locks->n; i++) {
const GraphId other_node_id = all_locks->locks[i].id;
- const Mutex *other =
- static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+ const Mutex* other =
+ static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
if (other == nullptr) {
// Ignore stale lock
continue;
@@ -1396,7 +1376,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Add the acquired-before edge to the graph.
if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
ScopedDeadlockReportBuffers scoped_buffers;
- DeadlockReportBuffers *b = scoped_buffers.b;
+ DeadlockReportBuffers* b = scoped_buffers.b;
static int number_of_reported_deadlocks = 0;
number_of_reported_deadlocks++;
// Symbolize only 2 first deadlock report to avoid huge slowdowns.
@@ -1407,37 +1387,40 @@ static GraphId DeadlockCheck(Mutex *mu) {
for (int j = 0; j != all_locks->n; j++) {
void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
if (pr != nullptr) {
- snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
+ snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
len += strlen(&b->buf[len]);
}
}
ABSL_RAW_LOG(ERROR,
"Acquiring absl::Mutex %p while holding %s; a cycle in the "
"historical lock ordering graph has been observed",
- static_cast<void *>(mu), b->buf);
+ static_cast<void*>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: ");
- int path_len = deadlock_graph->FindPath(
- mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
- for (int j = 0; j != path_len; j++) {
+ int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
+ ABSL_ARRAYSIZE(b->path), b->path);
+ for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
GraphId id = b->path[j];
- Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+ Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
if (path_mu == nullptr) continue;
void** stack;
int depth = deadlock_graph->GetStackTrace(id, &stack);
snprintf(b->buf, sizeof(b->buf),
- "mutex@%p stack: ", static_cast<void *>(path_mu));
+ "mutex@%p stack: ", static_cast<void*>(path_mu));
StackString(stack, depth, b->buf + strlen(b->buf),
static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
symbolize);
ABSL_RAW_LOG(ERROR, "%s", b->buf);
}
+ if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
+ ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
+ }
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kAbort) {
deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
return mu_id;
}
- break; // report at most one potential deadlock per acquisition
+ break; // report at most one potential deadlock per acquisition
}
}
@@ -1446,7 +1429,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Invoke DeadlockCheck() iff we're in debug mode and
// deadlock checking has been enabled.
-static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
return DeadlockCheck(mu);
@@ -1473,13 +1456,13 @@ void Mutex::AssertNotHeld() const {
(mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
- GraphId id = GetGraphId(const_cast<Mutex *>(this));
- SynchLocksHeld *locks = Synch_GetAllLocks();
+ GraphId id = GetGraphId(const_cast<Mutex*>(this));
+ SynchLocksHeld* locks = Synch_GetAllLocks();
for (int i = 0; i != locks->n; i++) {
if (locks->locks[i].id == id) {
- SynchEvent *mu_events = GetSynchEvent(this);
+ SynchEvent* mu_events = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
- static_cast<const void *>(this),
+ static_cast<const void*>(this),
(mu_events == nullptr ? "" : mu_events->name));
}
}
@@ -1492,8 +1475,8 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
- if ((v & (kMuReader|kMuEvent)) != 0) {
- return false; // a reader or tracing -> give up
+ if ((v & (kMuReader | kMuEvent)) != 0) {
+ return false; // a reader or tracing -> give up
} else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
mu->compare_exchange_strong(v, kMuWriter | v,
std::memory_order_acquire,
@@ -1510,8 +1493,7 @@ void Mutex::Lock() {
intptr_t v = mu_.load(std::memory_order_relaxed);
// try fast acquire, then spin loop
if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
- !mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
+ !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed)) {
// try spin acquire, then slow loop
if (!TryAcquireWithSpinning(&this->mu_)) {
@@ -1537,7 +1519,7 @@ void Mutex::ReaderLock() {
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
}
-void Mutex::LockWhen(const Condition &cond) {
+void Mutex::LockWhen(const Condition& cond) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kExclusive, &cond, 0);
@@ -1545,21 +1527,26 @@ void Mutex::LockWhen(const Condition &cond) {
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}
-bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
}
-bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
+bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
- bool res = LockSlowWithDeadline(kExclusive, &cond,
- KernelTimeout(deadline), 0);
+ bool res =
+ LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
return res;
}
-void Mutex::ReaderLockWhen(const Condition &cond) {
+void Mutex::ReaderLockWhen(const Condition& cond) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kShared, &cond, 0);
@@ -1567,12 +1554,17 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
}
-bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
absl::Duration timeout) {
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
}
-bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
absl::Time deadline) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
@@ -1582,23 +1574,34 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
return res;
}
-void Mutex::Await(const Condition &cond) {
- if (cond.Eval()) { // condition already true; nothing to do
+void Mutex::Await(const Condition& cond) {
+ if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
}
- } else { // normal case
+ } else { // normal case
ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
"condition untrue on return from Await");
}
}
-bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{timeout};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
}
-bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
- if (cond.Eval()) { // condition already true; nothing to do
+bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
+ if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
}
@@ -1612,14 +1615,14 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
return res;
}
-bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
this->AssertReaderHeld();
MuHow how =
(mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
- SynchWaitParams waitp(
- how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
+ SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
+ Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
int flags = kMuHasBlocked;
if (!Condition::GuaranteedEqual(&cond, nullptr)) {
flags |= kMuIsCond;
@@ -1639,14 +1642,13 @@ bool Mutex::TryLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
- mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
+ mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed)) {
DebugOnlyLockEnter(this);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
return true;
}
- if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kMuEvent) != 0) { // we're recording events
if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
mu_.compare_exchange_strong(
v, (kExclusive->fast_or | v) + kExclusive->fast_add,
@@ -1672,7 +1674,7 @@ bool Mutex::ReaderTryLock() {
// changing (typically because the reader count changes) under the CAS. We
// limit the number of attempts to avoid having to think about livelock.
int loop_limit = 5;
- while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+ while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
std::memory_order_acquire,
std::memory_order_relaxed)) {
@@ -1684,7 +1686,7 @@ bool Mutex::ReaderTryLock() {
loop_limit--;
v = mu_.load(std::memory_order_relaxed);
}
- if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kMuEvent) != 0) { // we're recording events
loop_limit = 5;
while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
@@ -1723,7 +1725,7 @@ void Mutex::Unlock() {
// should_try_cas is whether we'll try a compare-and-swap immediately.
// NOTE: optimized out when kDebugMode is false.
bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
- (v & (kMuWait | kMuDesig)) != kMuWait);
+ (v & (kMuWait | kMuDesig)) != kMuWait);
// But, we can use an alternate computation of it, that compilers
// currently don't find on their own. When that changes, this function
// can be simplified.
@@ -1740,10 +1742,9 @@ void Mutex::Unlock() {
static_cast<long long>(v), static_cast<long long>(x),
static_cast<long long>(y));
}
- if (x < y &&
- mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
- std::memory_order_release,
- std::memory_order_relaxed)) {
+ if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
// fast writer release (writer with no waiters or with designated waker)
} else {
this->UnlockSlow(nullptr /*no waitp*/); // take slow path
@@ -1753,7 +1754,7 @@ void Mutex::Unlock() {
// Requires v to represent a reader-locked state.
static bool ExactlyOneReader(intptr_t v) {
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
assert((v & kMuHigh) != 0);
// The more straightforward "(v & kMuHigh) == kMuOne" also works, but
// on some architectures the following generates slightly smaller code.
@@ -1766,12 +1767,11 @@ void Mutex::ReaderUnlock() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
- if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
+ if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
// fast reader release (reader with no waiters)
- intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed)) {
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
return;
@@ -1810,7 +1810,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) {
}
// Internal version of LockWhen(). See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
int flags) {
ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
@@ -1818,7 +1818,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
}
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock) {
// Delicate annotation dance.
@@ -1868,7 +1868,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
// tsan). As the result there is no tsan-visible synchronization between the
// addition and this thread. So if we would enable race detection here,
// it would race with the predicate initialization.
-static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
// Memory accesses are already ignored inside of lock/unlock operations,
// but synchronization operations are also ignored. When we evaluate the
// predicate we must ignore only memory accesses but not synchronization,
@@ -1893,7 +1893,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// obstruct this call
// - kMuIsCond indicates that this is a conditional acquire (condition variable,
// Await, LockWhen) so contention profiling should be suppressed.
-bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
KernelTimeout t, int flags) {
intptr_t v = mu_.load(std::memory_order_relaxed);
bool unlock = false;
@@ -1910,9 +1910,9 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
}
unlock = true;
}
- SynchWaitParams waitp(
- how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
+ SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
+ Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
if (!Condition::GuaranteedEqual(cond, nullptr)) {
flags |= kMuIsCond;
}
@@ -1953,20 +1953,20 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
"%s: Mutex corrupt: both reader and writer lock held: %p",
- label, reinterpret_cast<void *>(v));
+ label, reinterpret_cast<void*>(v));
RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
- "%s: Mutex corrupt: waiting writer with no waiters: %p",
- label, reinterpret_cast<void *>(v));
+ "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
+ reinterpret_cast<void*>(v));
assert(false);
}
-void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+ PostSynchEvent(
+ this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -1991,11 +1991,11 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
flags |= kMuHasBlocked;
c = 0;
}
- } else { // need to access waiter list
+ } else { // need to access waiter list
bool dowait = false;
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
+ PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
intptr_t nv =
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
kMuWait;
@@ -2007,7 +2007,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
v, reinterpret_cast<intptr_t>(new_h) | nv,
std::memory_order_release, std::memory_order_relaxed)) {
dowait = true;
- } else { // attempted Enqueue() failed
+ } else { // attempted Enqueue() failed
// zero out the waitp field set by Enqueue()
waitp->thread->waitp = nullptr;
}
@@ -2020,9 +2020,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- h->readers += kMuOne; // inc reader count in waiter
- do { // release spinlock
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ h->readers += kMuOne; // inc reader count in waiter
+ do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
std::memory_order_release,
@@ -2032,7 +2032,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->how == kShared)) {
break; // we timed out, or condition true, so return
}
- this->UnlockSlow(waitp); // got lock but condition false
+ this->UnlockSlow(waitp); // got lock but condition false
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
@@ -2043,18 +2043,19 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
intptr_t wr_wait = 0;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
- wr_wait = kMuWrWait; // give priority to a waiting writer
+ wr_wait = kMuWrWait; // give priority to a waiting writer
}
- do { // release spinlock
+ do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(
- v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
- reinterpret_cast<intptr_t>(new_h),
+ v,
+ (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+ reinterpret_cast<intptr_t>(new_h),
std::memory_order_release, std::memory_order_relaxed));
dowait = true;
}
@@ -2074,9 +2075,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
- SYNCH_EV_READERLOCK_RETURNING);
+ PostSynchEvent(this, waitp->how == kExclusive
+ ? SYNCH_EV_LOCK_RETURNING
+ : SYNCH_EV_READERLOCK_RETURNING);
}
}
@@ -2085,28 +2086,28 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+ PostSynchEvent(
+ this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
}
int c = 0;
// the waiter under consideration to wake, or zero
- PerThreadSynch *w = nullptr;
+ PerThreadSynch* w = nullptr;
// the predecessor to w or zero
- PerThreadSynch *pw = nullptr;
+ PerThreadSynch* pw = nullptr;
// head of the list searched previously, or zero
- PerThreadSynch *old_h = nullptr;
+ PerThreadSynch* old_h = nullptr;
// a condition that's known to be false.
- const Condition *known_false = nullptr;
- PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
- intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
- // later writer could have acquired the lock
- // (starvation avoidance)
+ const Condition* known_false = nullptr;
+ PerThreadSynch* wake_list = kPerThreadSynchNull; // list of threads to wake
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
+ // later writer could have acquired the lock
+ // (starvation avoidance)
ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
@@ -2126,8 +2127,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
// fast reader release (reader with no waiters)
intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed)) {
return;
}
@@ -2135,16 +2135,16 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
mu_.compare_exchange_strong(v, v | kMuSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- if ((v & kMuWait) == 0) { // no one to wake
+ if ((v & kMuWait) == 0) { // no one to wake
intptr_t nv;
bool do_enqueue = true; // always Enqueue() the first time
ABSL_RAW_CHECK(waitp != nullptr,
"UnlockSlow is confused"); // about to sleep
- do { // must loop to release spinlock as reader count may change
+ do { // must loop to release spinlock as reader count may change
v = mu_.load(std::memory_order_relaxed);
// decrement reader count if there are readers
- intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
- PerThreadSynch *new_h = nullptr;
+ intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
+ PerThreadSynch* new_h = nullptr;
if (do_enqueue) {
// If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
// we must not retry here. The initial attempt will always have
@@ -2168,21 +2168,20 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// release spinlock & our lock; retry if reader-count changed
// (writer count cannot change since we hold lock)
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
break;
}
// There are waiters.
// Set h to the head of the circular waiter list.
- PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch* h = GetPerThreadSynch(v);
if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
// a reader but not the last
- h->readers -= kMuOne; // release our lock
- intptr_t nv = v; // normally just release spinlock
+ h->readers -= kMuOne; // release our lock
+ intptr_t nv = v; // normally just release spinlock
if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr,
"waiters disappeared during Enqueue()!");
nv &= kMuLow;
@@ -2200,8 +2199,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// The lock is becoming free, and there's a waiter
if (old_h != nullptr &&
- !old_h->may_skip) { // we used old_h as a terminator
- old_h->may_skip = true; // allow old_h to skip once more
+ !old_h->may_skip) { // we used old_h as a terminator
+ old_h->may_skip = true; // allow old_h to skip once more
ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next; // old_h not head & can skip to successor
@@ -2210,7 +2209,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (h->next->waitp->how == kExclusive &&
Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
// easy case: writer with no condition; no need to search
- pw = h; // wake w, the successor of h (=pw)
+ pw = h; // wake w, the successor of h (=pw)
w = h->next;
w->wake = true;
// We are waking up a writer. This writer may be racing against
@@ -2233,13 +2232,13 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// waiter has a condition or is a reader. We avoid searching over
// waiters we've searched on previous iterations by starting at
// old_h if it's set. If old_h==h, there's no one to wakeup at all.
- if (old_h == h) { // we've searched before, and nothing's new
- // so there's no one to wake.
- intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+ if (old_h == h) { // we've searched before, and nothing's new
+ // so there's no one to wake.
+ intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
- if (waitp != nullptr) { // we must queue ourselves and sleep
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ h->maybe_unlocking = false; // finished unlocking
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
nv &= kMuLow;
if (new_h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
@@ -2253,12 +2252,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// set up to walk the list
- PerThreadSynch *w_walk; // current waiter during list walk
- PerThreadSynch *pw_walk; // previous waiter during list walk
+ PerThreadSynch* w_walk; // current waiter during list walk
+ PerThreadSynch* pw_walk; // previous waiter during list walk
if (old_h != nullptr) { // we've searched up to old_h before
pw_walk = old_h;
w_walk = old_h->next;
- } else { // no prior search, start at beginning
+ } else { // no prior search, start at beginning
pw_walk =
nullptr; // h->next's predecessor may change; don't record it
w_walk = h->next;
@@ -2284,7 +2283,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// to walk the path from w_walk to h inclusive. (TryRemove() can remove
// a waiter anywhere, but it acquires both the spinlock and the Mutex)
- old_h = h; // remember we searched to here
+ old_h = h; // remember we searched to here
// Walk the path upto and including h looking for waiters we can wake.
while (pw_walk != h) {
@@ -2296,24 +2295,24 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// is in fact true
EvalConditionIgnored(this, w_walk->waitp->cond))) {
if (w == nullptr) {
- w_walk->wake = true; // can wake this waiter
+ w_walk->wake = true; // can wake this waiter
w = w_walk;
pw = pw_walk;
if (w_walk->waitp->how == kExclusive) {
wr_wait = kMuWrWait;
- break; // bail if waking this writer
+ break; // bail if waking this writer
}
} else if (w_walk->waitp->how == kShared) { // wake if a reader
w_walk->wake = true;
- } else { // writer with true condition
+ } else { // writer with true condition
wr_wait = kMuWrWait;
}
- } else { // can't wake; condition false
+ } else { // can't wake; condition false
known_false = w_walk->waitp->cond; // remember last false condition
}
- if (w_walk->wake) { // we're waking reader w_walk
- pw_walk = w_walk; // don't skip similar waiters
- } else { // not waking; skip as much as possible
+ if (w_walk->wake) { // we're waking reader w_walk
+ pw_walk = w_walk; // don't skip similar waiters
+ } else { // not waking; skip as much as possible
pw_walk = Skip(w_walk);
}
// If pw_walk == h, then load of pw_walk->next can race with
@@ -2340,8 +2339,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
h = DequeueAllWakeable(h, pw, &wake_list);
intptr_t nv = (v & kMuEvent) | kMuDesig;
- // assume no waiters left,
- // set kMuDesig for INV1a
+ // assume no waiters left,
+ // set kMuDesig for INV1a
if (waitp != nullptr) { // we must queue ourselves and sleep
h = Enqueue(h, waitp, v, kMuIsCond);
@@ -2354,7 +2353,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (h != nullptr) { // there are waiters left
h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
+ h->maybe_unlocking = false; // finished unlocking
nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
}
@@ -2365,12 +2364,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// aggressive here; no one can proceed till we do
c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
- } // end of for(;;)-loop
+ } // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
int64_t total_wait_cycles = 0;
int64_t max_wait_cycles = 0;
- int64_t now = base_internal::CycleClock::Now();
+ int64_t now = CycleClock::Now();
do {
// Profile lock contention events only if the waiter was trying to acquire
// the lock, not waiting on a condition variable or Condition.
@@ -2382,7 +2381,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
wake_list->waitp->contention_start_cycles = now;
wake_list->waitp->should_submit_contention_data = true;
}
- wake_list = Wakeup(wake_list); // wake waiters
+ wake_list = Wakeup(wake_list); // wake waiters
} while (wake_list != kPerThreadSynchNull);
if (total_wait_cycles > 0) {
mutex_tracer("slow release", this, total_wait_cycles);
@@ -2410,7 +2409,7 @@ void Mutex::Trans(MuHow how) {
// condition variable. If this mutex is free, we simply wake the thread.
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
-void Mutex::Fer(PerThreadSynch *w) {
+void Mutex::Fer(PerThreadSynch* w) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
@@ -2435,9 +2434,9 @@ void Mutex::Fer(PerThreadSynch *w) {
IncrementSynchSem(this, w);
return;
} else {
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+ PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
if (mu_.compare_exchange_strong(
@@ -2447,8 +2446,8 @@ void Mutex::Fer(PerThreadSynch *w) {
}
} else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
do {
@@ -2467,19 +2466,18 @@ void Mutex::Fer(PerThreadSynch *w) {
void Mutex::AssertHeld() const {
if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
- SynchEvent *e = GetSynchEvent(this);
+ SynchEvent* e = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
- static_cast<const void *>(this),
- (e == nullptr ? "" : e->name));
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
void Mutex::AssertReaderHeld() const {
if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
- SynchEvent *e = GetSynchEvent(this);
- ABSL_RAW_LOG(
- FATAL, "thread should hold at least a read lock on Mutex %p %s",
- static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+ SynchEvent* e = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL,
+ "thread should hold at least a read lock on Mutex %p %s",
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
@@ -2490,13 +2488,17 @@ static const intptr_t kCvEvent = 0x0002L; // record events
static const intptr_t kCvLow = 0x0003L; // low order bits of CV
// Hack to make constant values available to gdb pretty printer
-enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+enum {
+ kGdbCvSpin = kCvSpin,
+ kGdbCvEvent = kCvEvent,
+ kGdbCvLow = kCvLow,
+};
static_assert(PerThreadSynch::kAlignment > kCvLow,
"PerThreadSynch::kAlignment must be greater than kCvLow");
-void CondVar::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+void CondVar::EnableDebugLog(const char* name) {
+ SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
e->log = true;
UnrefSynchEvent(e);
}
@@ -2507,25 +2509,23 @@ CondVar::~CondVar() {
}
}
-
// Remove thread s from the list of waiters on this condition variable.
-void CondVar::Remove(PerThreadSynch *s) {
+void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
- PerThreadSynch *w = h;
+ PerThreadSynch* w = h;
while (w->next != s && w->next != h) { // search for thread
w = w->next;
}
- if (w->next == s) { // found thread; remove it
+ if (w->next == s) { // found thread; remove it
w->next = s->next;
if (h == s) {
h = (w == s) ? nullptr : w;
@@ -2534,7 +2534,7 @@ void CondVar::Remove(PerThreadSynch *s) {
s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
}
}
- // release spinlock
+ // release spinlock
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
return;
@@ -2557,14 +2557,14 @@ void CondVar::Remove(PerThreadSynch *s) {
// variable queue just before the mutex is to be unlocked, and (most
// importantly) after any call to an external routine that might re-enter the
// mutex code.
-static void CondVarEnqueue(SynchWaitParams *waitp) {
+static void CondVarEnqueue(SynchWaitParams* waitp) {
// This thread might be transferred to the Mutex queue by Fer() when
// we are woken. To make sure that is what happens, Enqueue() doesn't
// call CondVarEnqueue() again but instead uses its normal code. We
// must do this before we queue ourselves so that cv_word will be null
// when seen by the dequeuer, who may wish immediately to requeue
// this thread on another queue.
- std::atomic<intptr_t> *cv_word = waitp->cv_word;
+ std::atomic<intptr_t>* cv_word = waitp->cv_word;
waitp->cv_word = nullptr;
intptr_t v = cv_word->load(std::memory_order_relaxed);
@@ -2577,8 +2577,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
v = cv_word->load(std::memory_order_relaxed);
}
ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
- waitp->thread->waitp = waitp; // prepare ourselves for waiting
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h == nullptr) { // add this thread to waiter list
waitp->thread->next = waitp->thread;
} else {
@@ -2591,8 +2591,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
std::memory_order_release);
}
-bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
- bool rc = false; // return value; true iff we timed-out
+bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
+ bool rc = false; // return value; true iff we timed-out
intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
@@ -2659,27 +2659,25 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
return rc;
}
-bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
+ return WaitCommon(mu, KernelTimeout(timeout));
}
-bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
+bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
return WaitCommon(mu, KernelTimeout(deadline));
}
-void CondVar::Wait(Mutex *mu) {
- WaitCommon(mu, KernelTimeout::Never());
-}
+void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
// Wake thread w
// If it was a timed wait, w will be waiting on w->cv
// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
-void CondVar::Wakeup(PerThreadSynch *w) {
+void CondVar::Wakeup(PerThreadSynch* w) {
if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
// The waiting thread only needs to observe "w->state == kAvailable" to be
// released, we must cache "cvmu" before clearing "next".
- Mutex *mu = w->waitp->cvmu;
+ Mutex* mu = w->waitp->cvmu;
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
Mutex::IncrementSynchSem(mu, w);
@@ -2696,11 +2694,10 @@ void CondVar::Signal() {
for (v = cv_.load(std::memory_order_relaxed); v != 0;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
- PerThreadSynch *w = nullptr;
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
+ PerThreadSynch* w = nullptr;
if (h != nullptr) { // remove first waiter
w = h->next;
if (w == h) {
@@ -2709,11 +2706,11 @@ void CondVar::Signal() {
h->next = w->next;
}
}
- // release spinlock
+ // release spinlock
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
if (w != nullptr) {
- CondVar::Wakeup(w); // wake waiter, if there was one
+ CondVar::Wakeup(w); // wake waiter, if there was one
cond_var_tracer("Signal wakeup", this);
}
if ((v & kCvEvent) != 0) {
@@ -2728,7 +2725,7 @@ void CondVar::Signal() {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
}
-void CondVar::SignalAll () {
+void CondVar::SignalAll() {
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2742,11 +2739,11 @@ void CondVar::SignalAll () {
if ((v & kCvSpin) == 0 &&
cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
- PerThreadSynch *w;
- PerThreadSynch *n = h->next;
- do { // for every thread, wake it up
+ PerThreadSynch* w;
+ PerThreadSynch* n = h->next;
+ do { // for every thread, wake it up
w = n;
n = n->next;
CondVar::Wakeup(w);
@@ -2774,42 +2771,41 @@ void ReleasableMutexLock::Release() {
}
#ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
+extern "C" void __tsan_read1(void* addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled
#endif
// A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
+static bool Dereference(void* arg) {
// ThreadSanitizer does not instrument this file for memory accesses.
// This function dereferences a user variable that can participate
// in a data race, so we need to manually tell TSan about this memory access.
__tsan_read1(arg);
- return *(static_cast<bool *>(arg));
+ return *(static_cast<bool*>(arg));
}
ABSL_CONST_INIT const Condition Condition::kTrue;
-Condition::Condition(bool (*func)(void *), void *arg)
- : eval_(&CallVoidPtrFunction),
- arg_(arg) {
+Condition::Condition(bool (*func)(void*), void* arg)
+ : eval_(&CallVoidPtrFunction), arg_(arg) {
static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer passed to Condition.");
StoreCallback(func);
}
-bool Condition::CallVoidPtrFunction(const Condition *c) {
- using FunctionPointer = bool (*)(void *);
+bool Condition::CallVoidPtrFunction(const Condition* c) {
+ using FunctionPointer = bool (*)(void*);
FunctionPointer function_pointer;
std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
return (*function_pointer)(c->arg_);
}
-Condition::Condition(const bool *cond)
+Condition::Condition(const bool* cond)
: eval_(CallVoidPtrFunction),
// const_cast is safe since Dereference does not modify arg
- arg_(const_cast<bool *>(cond)) {
- using FunctionPointer = bool (*)(void *);
+ arg_(const_cast<bool*>(cond)) {
+ using FunctionPointer = bool (*)(void*);
const FunctionPointer dereference = Dereference;
StoreCallback(dereference);
}
@@ -2819,7 +2815,7 @@ bool Condition::Eval() const {
return (this->eval_ == nullptr) || (*this->eval_)(this);
}
-bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
+bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
// kTrue logic.
if (a == nullptr || a->eval_ == nullptr) {
return b == nullptr || b->eval_ == nullptr;
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index f793cc0e..645c26d9 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -92,26 +92,42 @@ struct SynchWaitParams;
//
// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
-// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
+// *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
// Mutex. During the span of time between the Lock() and Unlock() operations,
-// a mutex is said to be *held*. By design all mutexes support exclusive/write
+// a mutex is said to be *held*. By design, all mutexes support exclusive/write
// locks, as this is the most common way to use a mutex.
//
+// Mutex operations are only allowed under certain conditions; otherwise an
+// operation is "invalid", and disallowed by the API. The conditions concern
+// both the current state of the mutex and the identity of the threads that
+// are performing the operations.
+//
// The `Mutex` state machine for basic lock/unlock operations is quite simple:
//
-// | | Lock() | Unlock() |
-// |----------------+------------+----------|
-// | Free | Exclusive | invalid |
-// | Exclusive | blocks | Free |
+// | | Lock() | Unlock() |
+// |----------------+------------------------+----------|
+// | Free | Exclusive | invalid |
+// | Exclusive | blocks, then exclusive | Free |
+//
+// The full conditions are as follows.
+//
+// * Calls to `Unlock()` require that the mutex be held, and must be made in the
+// same thread that performed the corresponding `Lock()` operation which
+// acquired the mutex; otherwise the call is invalid.
+//
+// * The mutex being non-reentrant (or non-recursive) means that a call to
+// `Lock()` or `TryLock()` must not be made in a thread that already holds the
+// mutex; such a call is invalid.
//
-// Attempts to `Unlock()` must originate from the thread that performed the
-// corresponding `Lock()` operation.
+// * In other words, the state of being "held" has both a temporal component
+// (from `Lock()` until `Unlock()`) as well as a thread identity component:
+// the mutex is held *by a particular thread*.
//
-// An "invalid" operation is disallowed by the API. The `Mutex` implementation
-// is allowed to do anything on an invalid call, including but not limited to
+// An "invalid" operation has undefined behavior. The `Mutex` implementation
+// is allowed to do anything on an invalid call, including, but not limited to,
// crashing with a useful error message, silently succeeding, or corrupting
-// data structures. In debug mode, the implementation attempts to crash with a
-// useful error message.
+// data structures. In debug mode, the implementation may crash with a useful
+// error message.
//
// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
// is, however, approximately fair over long periods, and starvation-free for
@@ -125,8 +141,9 @@ struct SynchWaitParams;
// issues that could potentially result in race conditions and deadlocks.
//
// For more information about the lock annotations, please see
-// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-// in the Clang documentation.
+// [Thread Safety
+// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
+// documentation.
//
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
@@ -257,7 +274,7 @@ class ABSL_LOCKABLE Mutex {
// Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
//
// These methods may be used (along with the complementary `Reader*()`
- // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
+ // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
// etc.) from reader/writer lock usage.
void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
@@ -307,7 +324,7 @@ class ABSL_LOCKABLE Mutex {
// `true`, `Await()` *may* skip the release/re-acquire step.
//
// `Await()` requires that this thread holds this `Mutex` in some mode.
- void Await(const Condition &cond);
+ void Await(const Condition& cond);
// Mutex::LockWhen()
// Mutex::ReaderLockWhen()
@@ -317,11 +334,11 @@ class ABSL_LOCKABLE Mutex {
// be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
// logically equivalent to `*Lock(); Await();` though they may have different
// performance characteristics.
- void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
+ void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
- void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
+ void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION();
- void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
this->LockWhen(cond);
}
@@ -346,9 +363,9 @@ class ABSL_LOCKABLE Mutex {
// Negative timeouts are equivalent to a zero timeout.
//
// This method requires that this thread holds this `Mutex` in some mode.
- bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
+ bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout);
- bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
+ bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
// Mutex::LockWhenWithTimeout()
// Mutex::ReaderLockWhenWithTimeout()
@@ -361,11 +378,11 @@ class ABSL_LOCKABLE Mutex {
// `true` on return.
//
// Negative timeouts are equivalent to a zero timeout.
- bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithTimeout(cond, timeout);
}
@@ -381,11 +398,11 @@ class ABSL_LOCKABLE Mutex {
// on return.
//
// Deadlines in the past are equivalent to an immediate deadline.
- bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithDeadline(cond, deadline);
}
@@ -407,7 +424,7 @@ class ABSL_LOCKABLE Mutex {
// substantially reduce `Mutex` performance; it should be set only for
// non-production runs. Optimization options may also disable invariant
// checks.
- void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+ void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
// Mutex::EnableDebugLog()
//
@@ -416,7 +433,7 @@ class ABSL_LOCKABLE Mutex {
// call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
//
// Note: This method substantially reduces `Mutex` performance.
- void EnableDebugLog(const char *name);
+ void EnableDebugLog(const char* name);
// Deadlock detection
@@ -444,7 +461,7 @@ class ABSL_LOCKABLE Mutex {
// A `MuHow` is a constant that indicates how a lock should be acquired.
// Internal implementation detail. Clients should ignore.
- typedef const struct MuHowS *MuHow;
+ typedef const struct MuHowS* MuHow;
// Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
//
@@ -466,37 +483,37 @@ class ABSL_LOCKABLE Mutex {
// Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem.
- static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
- static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+ static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
+ static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
synchronization_internal::KernelTimeout t);
// slow path acquire
- void LockSlowLoop(SynchWaitParams *waitp, int flags);
+ void LockSlowLoop(SynchWaitParams* waitp, int flags);
// wrappers around LockSlowLoop()
- bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+ bool LockSlowWithDeadline(MuHow how, const Condition* cond,
synchronization_internal::KernelTimeout t,
int flags);
- void LockSlow(MuHow how, const Condition *cond,
+ void LockSlow(MuHow how, const Condition* cond,
int flags) ABSL_ATTRIBUTE_COLD;
// slow path release
- void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
+ void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
// Common code between Await() and AwaitWithTimeout/Deadline()
- bool AwaitCommon(const Condition &cond,
+ bool AwaitCommon(const Condition& cond,
synchronization_internal::KernelTimeout t);
// Attempt to remove thread s from queue.
- void TryRemove(base_internal::PerThreadSynch *s);
+ void TryRemove(base_internal::PerThreadSynch* s);
// Block a thread on mutex.
- void Block(base_internal::PerThreadSynch *s);
+ void Block(base_internal::PerThreadSynch* s);
// Wake a thread; return successor.
- base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+ base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
friend class CondVar; // for access to Trans()/Fer().
void Trans(MuHow how); // used for CondVar->Mutex transfer
void Fer(
- base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
+ base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
// Catch the error of writing Mutex when intending MutexLock.
- Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
+ explicit Mutex(const volatile Mutex* /*ignored*/) {}
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
@@ -531,28 +548,28 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
// guaranteed to be locked when this object is constructed. Requires that
// `mu` be dereferenceable.
- explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
// Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
// the above, the condition given by `cond` is also guaranteed to hold when
// this object is constructed.
- explicit MutexLock(Mutex *mu, const Condition &cond)
+ explicit MutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
}
- MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
- MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
+ MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex)
+ MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
MutexLock& operator=(const MutexLock&) = delete;
MutexLock& operator=(MutexLock&&) = delete;
~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// ReaderMutexLock
@@ -561,11 +578,11 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// releases a shared lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
public:
- explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
mu->ReaderLock();
}
- explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+ explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
ABSL_SHARED_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->ReaderLockWhen(cond);
@@ -579,7 +596,7 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// WriterMutexLock
@@ -588,12 +605,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
// releases a write (exclusive) lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE WriterMutexLock {
public:
- explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLock();
}
- explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+ explicit WriterMutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLockWhen(cond);
@@ -607,7 +624,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// -----------------------------------------------------------------------------
@@ -665,7 +682,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
class Condition {
public:
// A Condition that returns the result of "(*func)(arg)"
- Condition(bool (*func)(void *), void *arg);
+ Condition(bool (*func)(void*), void* arg);
// Templated version for people who are averse to casts.
//
@@ -676,8 +693,22 @@ class Condition {
// Note: lambdas in this case must contain no bound variables.
//
// See class comment for performance advice.
- template<typename T>
- Condition(bool (*func)(T *), T *arg);
+ template <typename T>
+ Condition(bool (*func)(T*), T* arg);
+
+ // Same as above, but allows for cases where `arg` comes from a pointer that
+ // is convertible to the function parameter type `T*` but not an exact match.
+ //
+ // For example, the argument might be `X*` but the function takes `const X*`,
+ // or the argument might be `Derived*` while the function takes `Base*`, and
+ // so on for cases where the argument pointer can be implicitly converted.
+ //
+ // Implementation notes: This constructor overload is required in addition to
+ // the one above to allow deduction of `T` from `arg` for cases such as where
+ // a function template is passed as `func`. Also, the dummy `typename = void`
+ // template parameter exists just to work around a MSVC mangling bug.
+ template <typename T, typename = void>
+ Condition(bool (*func)(T*), typename absl::internal::identity<T>::type* arg);
// Templated version for invoking a method that returns a `bool`.
//
@@ -687,16 +718,16 @@ class Condition {
// Implementation Note: `absl::internal::identity` is used to allow methods to
// come from base classes. A simpler signature like
// `Condition(T*, bool (T::*)())` does not suffice.
- template<typename T>
- Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
+ template <typename T>
+ Condition(T* object, bool (absl::internal::identity<T>::type::*method)());
// Same as above, for const members
- template<typename T>
- Condition(const T *object,
- bool (absl::internal::identity<T>::type::* method)() const);
+ template <typename T>
+ Condition(const T* object,
+ bool (absl::internal::identity<T>::type::*method)() const);
// A Condition that returns the value of `*cond`
- explicit Condition(const bool *cond);
+ explicit Condition(const bool* cond);
// Templated version for invoking a functor that returns a `bool`.
// This approach accepts pointers to non-mutable lambdas, `std::function`,
@@ -723,12 +754,22 @@ class Condition {
// Implementation note: The second template parameter ensures that this
// constructor doesn't participate in overload resolution if T doesn't have
// `bool operator() const`.
- template <typename T, typename E = decltype(
- static_cast<bool (T::*)() const>(&T::operator()))>
- explicit Condition(const T *obj)
+ template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
+ &T::operator()))>
+ explicit Condition(const T* obj)
: Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
// A Condition that always returns `true`.
+ // kTrue is only useful in a narrow set of circumstances, mostly when
+ // it's passed conditionally. For example:
+ //
+ // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
+ //
+ // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
+ // don't return immediately when the timeout happens, they still block until
+ // the Mutex becomes available. The return value of these methods does
+ // not indicate if the timeout was reached; rather it indicates whether or
+ // not the condition is true.
ABSL_CONST_INIT static const Condition kTrue;
// Evaluates the condition.
@@ -741,7 +782,7 @@ class Condition {
// Two `Condition` values are guaranteed equal if both their `func` and `arg`
// components are the same. A null pointer is equivalent to a `true`
// condition.
- static bool GuaranteedEqual(const Condition *a, const Condition *b);
+ static bool GuaranteedEqual(const Condition* a, const Condition* b);
private:
// Sizing an allocation for a method pointer can be subtle. In the Itanium
@@ -769,12 +810,14 @@ class Condition {
bool (*eval_)(const Condition*) = nullptr;
// Either an argument for a function call or an object for a method call.
- void *arg_ = nullptr;
+ void* arg_ = nullptr;
// Various functions eval_ can point to:
static bool CallVoidPtrFunction(const Condition*);
- template <typename T> static bool CastAndCallFunction(const Condition* c);
- template <typename T> static bool CastAndCallMethod(const Condition* c);
+ template <typename T>
+ static bool CastAndCallFunction(const Condition* c);
+ template <typename T>
+ static bool CastAndCallMethod(const Condition* c);
// Helper methods for storing, validating, and reading callback arguments.
template <typename T>
@@ -786,7 +829,7 @@ class Condition {
}
template <typename T>
- inline void ReadCallback(T *callback) const {
+ inline void ReadCallback(T* callback) const {
std::memcpy(callback, callback_, sizeof(*callback));
}
@@ -843,7 +886,7 @@ class CondVar {
// spurious wakeup), then reacquires the `Mutex` and returns.
//
// Requires and ensures that the current thread holds the `Mutex`.
- void Wait(Mutex *mu);
+ void Wait(Mutex* mu);
// CondVar::WaitWithTimeout()
//
@@ -858,7 +901,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
+ bool WaitWithTimeout(Mutex* mu, absl::Duration timeout);
// CondVar::WaitWithDeadline()
//
@@ -875,7 +918,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
+ bool WaitWithDeadline(Mutex* mu, absl::Time deadline);
// CondVar::Signal()
//
@@ -892,18 +935,17 @@ class CondVar {
// Causes all subsequent uses of this `CondVar` to be logged via
// `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
// Note: this method substantially reduces `CondVar` performance.
- void EnableDebugLog(const char *name);
+ void EnableDebugLog(const char* name);
private:
- bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
- void Remove(base_internal::PerThreadSynch *s);
- void Wakeup(base_internal::PerThreadSynch *w);
+ bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
+ void Remove(base_internal::PerThreadSynch* s);
+ void Wakeup(base_internal::PerThreadSynch* w);
std::atomic<intptr_t> cv_; // Condition variable state.
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
};
-
// Variants of MutexLock.
//
// If you find yourself using one of these, consider instead using
@@ -914,14 +956,14 @@ class CondVar {
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
public:
- explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
this->mu_->Lock();
}
}
- explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+ explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
@@ -930,11 +972,13 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
}
~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ if (this->mu_ != nullptr) {
+ this->mu_->Unlock();
+ }
}
private:
- Mutex *const mu_;
+ Mutex* const mu_;
MutexLockMaybe(const MutexLockMaybe&) = delete;
MutexLockMaybe(MutexLockMaybe&&) = delete;
MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
@@ -947,25 +991,27 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
// mutex before destruction. `Release()` may be called at most once.
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
public:
- explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->Lock();
}
- explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+ explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
}
~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ if (this->mu_ != nullptr) {
+ this->mu_->Unlock();
+ }
}
void Release() ABSL_UNLOCK_FUNCTION();
private:
- Mutex *mu_;
+ Mutex* mu_;
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock(ReleasableMutexLock&&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
@@ -982,8 +1028,8 @@ inline CondVar::CondVar() : cv_(0) {}
// static
template <typename T>
-bool Condition::CastAndCallMethod(const Condition *c) {
- T *object = static_cast<T *>(c->arg_);
+bool Condition::CastAndCallMethod(const Condition* c) {
+ T* object = static_cast<T*>(c->arg_);
bool (T::*method_pointer)();
c->ReadCallback(&method_pointer);
return (object->*method_pointer)();
@@ -991,38 +1037,43 @@ bool Condition::CastAndCallMethod(const Condition *c) {
// static
template <typename T>
-bool Condition::CastAndCallFunction(const Condition *c) {
- bool (*function)(T *);
+bool Condition::CastAndCallFunction(const Condition* c) {
+ bool (*function)(T*);
c->ReadCallback(&function);
- T *argument = static_cast<T *>(c->arg_);
+ T* argument = static_cast<T*>(c->arg_);
return (*function)(argument);
}
template <typename T>
-inline Condition::Condition(bool (*func)(T *), T *arg)
+inline Condition::Condition(bool (*func)(T*), T* arg)
: eval_(&CastAndCallFunction<T>),
- arg_(const_cast<void *>(static_cast<const void *>(arg))) {
+ arg_(const_cast<void*>(static_cast<const void*>(arg))) {
static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer was passed to Condition.");
StoreCallback(func);
}
+template <typename T, typename>
+inline Condition::Condition(bool (*func)(T*),
+ typename absl::internal::identity<T>::type* arg)
+ // Just delegate to the overload above.
+ : Condition(func, arg) {}
+
template <typename T>
-inline Condition::Condition(T *object,
+inline Condition::Condition(T* object,
bool (absl::internal::identity<T>::type::*method)())
- : eval_(&CastAndCallMethod<T>),
- arg_(object) {
+ : eval_(&CastAndCallMethod<T>), arg_(object) {
static_assert(sizeof(&method) <= sizeof(callback_),
"An overlarge method pointer was passed to Condition.");
StoreCallback(method);
}
template <typename T>
-inline Condition::Condition(const T *object,
+inline Condition::Condition(const T* object,
bool (absl::internal::identity<T>::type::*method)()
const)
: eval_(&CastAndCallMethod<T>),
- arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {
+ arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
StoreCallback(method);
}
@@ -1052,7 +1103,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles));
// Register a hook for CondVar tracing.
@@ -1067,24 +1118,7 @@ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
-
-// Register a hook for symbolizing stack traces in deadlock detector reports.
-//
-// 'pc' is the program counter being symbolized, 'out' is the buffer to write
-// into, and 'out_size' is the size of the buffer. This function can return
-// false if symbolizing failed, or true if a NUL-terminated symbol was written
-// to 'out.'
-//
-// This has the same ordering and single-use limitations as
-// RegisterMutexProfiler() above.
-//
-// DEPRECATED: The default symbolizer function is absl::Symbolize() and the
-// ability to register a different hook for symbolizing stack traces will be
-// removed on or after 2023-05-01.
-ABSL_DEPRECATED("absl::RegisterSymbolizer() is deprecated and will be removed "
- "on or after 2023-05-01")
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
// EnableMutexInvariantDebugging()
//
@@ -1101,7 +1135,7 @@ void EnableMutexInvariantDebugging(bool enabled);
enum class OnDeadlockCycle {
kIgnore, // Neither report on nor attempt to track cycles in lock ordering
kReport, // Report lock cycles to stderr when detected
- kAbort, // Report lock cycles to stderr when detected, then abort
+ kAbort, // Report lock cycles to stderr when detected, then abort
};
// SetMutexDeadlockDetectionMode()
diff --git a/absl/synchronization/mutex_method_pointer_test.cc b/absl/synchronization/mutex_method_pointer_test.cc
index 1ec801a0..f4c82d27 100644
--- a/absl/synchronization/mutex_method_pointer_test.cc
+++ b/absl/synchronization/mutex_method_pointer_test.cc
@@ -26,8 +26,8 @@ class IncompleteClass;
#ifdef _MSC_VER
// These tests verify expectations about sizes of MSVC pointers to methods.
-// Pointers to methods are distinguished by whether their class hierachies
-// contain single inheritance, multiple inheritance, or virtual inheritence.
+// Pointers to methods are distinguished by whether their class hierarchies
+// contain single inheritance, multiple inheritance, or virtual inheritance.
// Declare classes of the various MSVC inheritance types.
class __single_inheritance SingleInheritance{};
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 34751cb1..b585c342 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -32,8 +32,9 @@
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/sysinfo.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/time/clock.h"
@@ -87,7 +88,7 @@ static void SetInvariantChecked(bool new_value) {
static void CheckSumG0G1(void *v) {
TestContext *cxt = static_cast<TestContext *>(v);
- ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in CheckSumG0G1");
+ CHECK_EQ(cxt->g0, -cxt->g1) << "Error in CheckSumG0G1";
SetInvariantChecked(true);
}
@@ -132,7 +133,7 @@ static void TestRW(TestContext *cxt, int c) {
} else {
for (int i = 0; i != cxt->iterations; i++) {
absl::ReaderMutexLock l(&cxt->mu);
- ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in TestRW");
+ CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW";
cxt->mu.AssertReaderHeld();
}
}
@@ -157,7 +158,7 @@ static void TestAwait(TestContext *cxt, int c) {
cxt->mu.AssertHeld();
while (cxt->g0 < cxt->iterations) {
cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
- ABSL_RAW_CHECK(mc.MyTurn(), "Error in TestAwait");
+ CHECK(mc.MyTurn()) << "Error in TestAwait";
cxt->mu.AssertHeld();
if (cxt->g0 < cxt->iterations) {
int a = cxt->g0 + 1;
@@ -185,7 +186,7 @@ static void TestSignalAll(TestContext *cxt, int c) {
}
static void TestSignal(TestContext *cxt, int c) {
- ABSL_RAW_CHECK(cxt->threads == 2, "TestSignal should use 2 threads");
+ CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads";
int target = c;
absl::MutexLock l(&cxt->mu);
cxt->mu.AssertHeld();
@@ -222,8 +223,8 @@ static void TestCVTimeout(TestContext *cxt, int c) {
static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
static void TestTime(TestContext *cxt, int c, bool use_cv) {
- ABSL_RAW_CHECK(cxt->iterations == 1, "TestTime should only use 1 iteration");
- ABSL_RAW_CHECK(cxt->threads > 2, "TestTime should use more than 2 threads");
+ CHECK_EQ(cxt->iterations, 1) << "TestTime should only use 1 iteration";
+ CHECK_GT(cxt->threads, 2) << "TestTime should use more than 2 threads";
const bool kFalse = false;
absl::Condition false_cond(&kFalse);
absl::Condition g0ge2(G0GE2, cxt);
@@ -234,26 +235,24 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
absl::Duration elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
- ABSL_RAW_CHECK(cxt->g0 == 1, "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
+ CHECK_EQ(cxt->g0, 1) << "TestTime failed";
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
cxt->g0++;
if (use_cv) {
cxt->cv.Signal();
@@ -263,26 +262,24 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0),
- "TestTime failed");
- ABSL_RAW_CHECK(cxt->g0 >= 3, "TestTime failed");
+ CHECK(absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0))
+ << "TestTime failed";
+ CHECK_GE(cxt->g0, 3) << "TestTime failed";
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
if (use_cv) {
cxt->cv.SignalAll();
}
@@ -291,14 +288,13 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
- ABSL_RAW_CHECK(cxt->g0 == cxt->threads, "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
+ CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed";
} else if (c == 1) {
absl::MutexLock l(&cxt->mu);
@@ -306,14 +302,12 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
} else {
- ABSL_RAW_CHECK(
- !cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)))
+ << "TestTime failed";
}
const absl::Duration elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9),
- "TestTime failed");
+ CHECK(absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9))
+ << "TestTime failed";
cxt->g0++;
} else if (c == 2) {
absl::MutexLock l(&cxt->mu);
@@ -322,8 +316,8 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
}
} else {
- ABSL_RAW_CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)),
- "TestTime failed");
+ CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)))
+ << "TestTime failed";
}
cxt->g0++;
} else {
@@ -400,7 +394,7 @@ static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
TestContext cxt;
cxt.mu.EnableInvariantDebugging(invariant, &cxt);
int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
- ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
+ CHECK(GetInvariantChecked()) << "Invariant not checked";
absl::EnableMutexInvariantDebugging(false); // Restore.
return ret;
}
@@ -872,6 +866,111 @@ TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
}
}
+// Some functions taking pointers to non-const.
+bool Equals42(int *p) { return *p == 42; }
+bool Equals43(int *p) { return *p == 43; }
+
+// Some functions taking pointers to const.
+bool ConstEquals42(const int *p) { return *p == 42; }
+bool ConstEquals43(const int *p) { return *p == 43; }
+
+// Some function templates taking pointers. Note it's possible for `T` to be
+// deduced as non-const or const, which creates the potential for ambiguity,
+// but which the implementation is careful to avoid.
+template <typename T>
+bool TemplateEquals42(T *p) {
+ return *p == 42;
+}
+template <typename T>
+bool TemplateEquals43(T *p) {
+ return *p == 43;
+}
+
+TEST(Mutex, FunctionPointerCondition) {
+ // Some arguments.
+ int x = 42;
+ const int const_x = 42;
+
+ // Parameter non-const, argument non-const.
+ EXPECT_TRUE(absl::Condition(Equals42, &x).Eval());
+ EXPECT_FALSE(absl::Condition(Equals43, &x).Eval());
+
+ // Parameter const, argument non-const.
+ EXPECT_TRUE(absl::Condition(ConstEquals42, &x).Eval());
+ EXPECT_FALSE(absl::Condition(ConstEquals43, &x).Eval());
+
+ // Parameter const, argument const.
+ EXPECT_TRUE(absl::Condition(ConstEquals42, &const_x).Eval());
+ EXPECT_FALSE(absl::Condition(ConstEquals43, &const_x).Eval());
+
+ // Parameter type deduced, argument non-const.
+ EXPECT_TRUE(absl::Condition(TemplateEquals42, &x).Eval());
+ EXPECT_FALSE(absl::Condition(TemplateEquals43, &x).Eval());
+
+ // Parameter type deduced, argument const.
+ EXPECT_TRUE(absl::Condition(TemplateEquals42, &const_x).Eval());
+ EXPECT_FALSE(absl::Condition(TemplateEquals43, &const_x).Eval());
+
+ // Parameter non-const, argument const is not well-formed.
+ EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(Equals42),
+ decltype(&const_x)>::value));
+ // Validate use of is_constructible by contrasting to a well-formed case.
+ EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(ConstEquals42),
+ decltype(&const_x)>::value));
+}
+
+// Example base and derived class for use in predicates and test below. Not a
+// particularly realistic example, but it suffices for testing purposes.
+struct Base {
+ explicit Base(int v) : value(v) {}
+ int value;
+};
+struct Derived : Base {
+ explicit Derived(int v) : Base(v) {}
+};
+
+// Some functions taking pointer to non-const `Base`.
+bool BaseEquals42(Base *p) { return p->value == 42; }
+bool BaseEquals43(Base *p) { return p->value == 43; }
+
+// Some functions taking pointer to const `Base`.
+bool ConstBaseEquals42(const Base *p) { return p->value == 42; }
+bool ConstBaseEquals43(const Base *p) { return p->value == 43; }
+
+TEST(Mutex, FunctionPointerConditionWithDerivedToBaseConversion) {
+ // Some arguments.
+ Derived derived(42);
+ const Derived const_derived(42);
+
+ // Parameter non-const base, argument derived non-const.
+ EXPECT_TRUE(absl::Condition(BaseEquals42, &derived).Eval());
+ EXPECT_FALSE(absl::Condition(BaseEquals43, &derived).Eval());
+
+ // Parameter const base, argument derived non-const.
+ EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &derived).Eval());
+ EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &derived).Eval());
+
+ // Parameter const base, argument derived const.
+ EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
+ EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
+
+ // Parameter const base, argument derived const.
+ EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
+ EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
+
+ // Parameter derived, argument base is not well-formed.
+ bool (*derived_pred)(const Derived *) = [](const Derived *) { return true; };
+ EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ Base *>::value));
+ EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ const Base *>::value));
+ // Validate use of is_constructible by contrasting to well-formed cases.
+ EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ Derived *>::value));
+ EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ const Derived *>::value));
+}
+
struct True {
template <class... Args>
bool operator()(Args...) const {
@@ -988,7 +1087,7 @@ static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
absl::Milliseconds(100));
x->mu1.Unlock();
}
- ABSL_RAW_CHECK(x->value < 4, "should not be invoked a fourth time");
+ CHECK_LT(x->value, 4) << "should not be invoked a fourth time";
// We arrange for the condition to return true on only the 2nd and 3rd calls.
return x->value == 2 || x->value == 3;
@@ -1131,6 +1230,25 @@ TEST(Mutex, DeadlockDetectorBazelWarning) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
}
+TEST(Mutex, DeadlockDetectorLongCycle) {
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
+
+ // This test generates a warning if it passes, and crashes otherwise.
+ // Cause bazel to ignore the warning.
+ ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
+
+ // Check that we survive a deadlock with a lock cycle.
+ std::vector<absl::Mutex> mutex(100);
+ for (size_t i = 0; i != mutex.size(); i++) {
+ mutex[i].Lock();
+ mutex[(i + 1) % mutex.size()].Lock();
+ mutex[i].Unlock();
+ mutex[(i + 1) % mutex.size()].Unlock();
+ }
+
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+}
+
// This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
// annotation-based static thread-safety analysis is not currently
// predicate-aware and cannot tell if the two for-loops that acquire and
@@ -1216,11 +1334,9 @@ static bool DelayIsWithinBounds(absl::Duration expected_delay,
// different clock than absl::Now(), but these cases should be handled by the
// the retry mechanism in each TimeoutTest.
if (actual_delay < expected_delay) {
- ABSL_RAW_LOG(WARNING,
- "Actual delay %s was too short, expected %s (difference %s)",
- absl::FormatDuration(actual_delay).c_str(),
- absl::FormatDuration(expected_delay).c_str(),
- absl::FormatDuration(actual_delay - expected_delay).c_str());
+ LOG(WARNING) << "Actual delay " << actual_delay
+ << " was too short, expected " << expected_delay
+ << " (difference " << actual_delay - expected_delay << ")";
pass = false;
}
// If the expected delay is <= zero then allow a small error tolerance, since
@@ -1231,11 +1347,9 @@ static bool DelayIsWithinBounds(absl::Duration expected_delay,
? absl::Milliseconds(10)
: TimeoutTestAllowedSchedulingDelay();
if (actual_delay > expected_delay + tolerance) {
- ABSL_RAW_LOG(WARNING,
- "Actual delay %s was too long, expected %s (difference %s)",
- absl::FormatDuration(actual_delay).c_str(),
- absl::FormatDuration(expected_delay).c_str(),
- absl::FormatDuration(actual_delay - expected_delay).c_str());
+ LOG(WARNING) << "Actual delay " << actual_delay
+ << " was too long, expected " << expected_delay
+ << " (difference " << actual_delay - expected_delay << ")";
pass = false;
}
return pass;
@@ -1285,12 +1399,6 @@ std::ostream &operator<<(std::ostream &os, const TimeoutTestParam &param) {
<< " expected_delay: " << param.expected_delay;
}
-std::string FormatString(const TimeoutTestParam &param) {
- std::ostringstream os;
- os << param;
- return os.str();
-}
-
// Like `thread::Executor::ScheduleAt` except:
// a) Delays zero or negative are executed immediately in the current thread.
// b) Infinite delays are never scheduled.
@@ -1420,13 +1528,13 @@ INSTANTIATE_TEST_SUITE_P(All, TimeoutTest,
TEST_P(TimeoutTest, Await) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 1;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1454,13 +1562,13 @@ TEST_P(TimeoutTest, Await) {
TEST_P(TimeoutTest, LockWhen) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 1;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1489,13 +1597,13 @@ TEST_P(TimeoutTest, LockWhen) {
TEST_P(TimeoutTest, ReaderLockWhen) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 0;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1525,13 +1633,13 @@ TEST_P(TimeoutTest, ReaderLockWhen) {
TEST_P(TimeoutTest, Wait) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 0;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1730,4 +1838,59 @@ TEST(Mutex, SignalExitedThread) {
for (auto &th : top) th.join();
}
+TEST(Mutex, WriterPriority) {
+ absl::Mutex mu;
+ bool wrote = false;
+ std::atomic<bool> saw_wrote{false};
+ auto readfunc = [&]() {
+ for (size_t i = 0; i < 10; ++i) {
+ absl::ReaderMutexLock lock(&mu);
+ if (wrote) {
+ saw_wrote = true;
+ break;
+ }
+ absl::SleepFor(absl::Seconds(1));
+ }
+ };
+ std::thread t1(readfunc);
+ absl::SleepFor(absl::Milliseconds(500));
+ std::thread t2(readfunc);
+ // Note: this test guards against a bug that was related to an uninit
+ // PerThreadSynch::priority, so the writer intentionally runs on a new thread.
+ std::thread t3([&]() {
+ // The writer should be able squeeze between the two alternating readers.
+ absl::MutexLock lock(&mu);
+ wrote = true;
+ });
+ t1.join();
+ t2.join();
+ t3.join();
+ EXPECT_TRUE(saw_wrote.load());
+}
+
+TEST(Mutex, LockWhenWithTimeoutResult) {
+ // Check various corner cases for Await/LockWhen return value
+ // with always true/always false conditions.
+ absl::Mutex mu;
+ const bool kAlwaysTrue = true, kAlwaysFalse = false;
+ const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
+ EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+ mu.Unlock();
+ EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+ EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
+ EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
+ std::thread th1([&]() {
+ EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+ mu.Unlock();
+ });
+ std::thread th2([&]() {
+ EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+ mu.Unlock();
+ });
+ absl::SleepFor(absl::Milliseconds(100));
+ mu.Unlock();
+ th1.join();
+ th2.join();
+}
+
} // namespace
diff --git a/absl/synchronization/notification_test.cc b/absl/synchronization/notification_test.cc
index 100ea76f..49ce61a5 100644
--- a/absl/synchronization/notification_test.cc
+++ b/absl/synchronization/notification_test.cc
@@ -79,7 +79,7 @@ static void BasicTests(bool notify_before_waiting, Notification* notification) {
// Allow for a slight early return, to account for quality of implementation
// issues on various platforms.
- const absl::Duration slop = absl::Microseconds(200);
+ const absl::Duration slop = absl::Milliseconds(5);
EXPECT_LE(delay - slop, elapsed)
<< "WaitForNotificationWithTimeout returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;