summaryrefslogtreecommitdiff
path: root/absl/base
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2020-11-30 21:23:11 -0800
committerGravatar vslashg <gfalcon@google.com>2020-12-01 09:51:56 -0500
commit592924480acf034aec0454160492a20bccdbdf3e (patch)
tree7f390a1dc3cfb802d8d60b067c3a51366a0c3de5 /absl/base
parente80c0b3536e1bdee68a874d529a9ba951faffe8b (diff)
Export of internal Abseil changes
-- 7b6a68aa92dcc7247236d1a1813914e035383bf8 by Abseil Team <absl-team@google.com>: Use atomic exchange to mark completion in absl::once_flag This prevents a potential for a missed wakeup if one thread marks itself as a waiter while another thread is completing the invocation. PiperOrigin-RevId: 344946791 -- ddff21d1dde08d1368d8be5fca81b154e78be2fc by Abseil Team <absl-team@google.com>: Add missing string_view include. This is currently used transitively through the cord header. PiperOrigin-RevId: 344845266 GitOrigin-RevId: 7b6a68aa92dcc7247236d1a1813914e035383bf8 Change-Id: Ia24e98a1df832fc4cb491d888fdf21182b5954f4
Diffstat (limited to 'absl/base')
-rw-r--r--absl/base/call_once.h11
-rw-r--r--absl/base/internal/spinlock.h5
-rw-r--r--absl/base/internal/spinlock_wait.h12
3 files changed, 8 insertions, 20 deletions
diff --git a/absl/base/call_once.h b/absl/base/call_once.h
index 5b468af8..96109f53 100644
--- a/absl/base/call_once.h
+++ b/absl/base/call_once.h
@@ -177,15 +177,8 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
scheduling_mode) == kOnceInit) {
base_internal::invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...);
- // The call to SpinLockWake below is an optimization, because the waiter
- // in SpinLockWait is waiting with a short timeout. The atomic load/store
- // sequence is slightly faster than an atomic exchange:
- // old_control = control->exchange(base_internal::kOnceDone,
- // std::memory_order_release);
- // We opt for a slightly faster case when there are no waiters, in spite
- // of longer tail latency when there are waiters.
- old_control = control->load(std::memory_order_relaxed);
- control->store(base_internal::kOnceDone, std::memory_order_release);
+ old_control =
+ control->exchange(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) {
base_internal::SpinLockWake(control, true);
}
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index e6ac9e64..dce1c854 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -15,11 +15,8 @@
//
// Most users requiring mutual exclusion should use Mutex.
-// SpinLock is provided for use in three situations:
+// SpinLock is provided for use in two situations:
// - for use in code that Mutex itself depends on
-// - to get a faster fast-path release under low contention (without an
-// atomic read-modify-write) In return, SpinLock has worse behaviour under
-// contention, which is why Mutex is preferred in most situations.
// - for async signal safety (see below)
// SpinLock is async signal safe. If a spinlock is used within a signal
diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h
index 169bc749..c34ce41c 100644
--- a/absl/base/internal/spinlock_wait.h
+++ b/absl/base/internal/spinlock_wait.h
@@ -43,18 +43,16 @@ uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode);
-// If possible, wake some thread that has called SpinLockDelay(w, ...). If
-// "all" is true, wake all such threads. This call is a hint, and on some
-// systems it may be a no-op; threads calling SpinLockDelay() will always wake
-// eventually even if SpinLockWake() is never called.
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
-// or may wait for a delay that can be truncated by a call to SpinLockWake(w).
-// In all cases, it must return in bounded time even if SpinLockWake() is not
-// called.
+// or may wait for a call to SpinLockWake(w).
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode);