summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--absl/base/call_once.h11
-rw-r--r--absl/base/internal/spinlock.h5
-rw-r--r--absl/base/internal/spinlock_wait.h12
-rw-r--r--absl/status/status.h1
4 files changed, 9 insertions, 20 deletions
diff --git a/absl/base/call_once.h b/absl/base/call_once.h
index 5b468af8..96109f53 100644
--- a/absl/base/call_once.h
+++ b/absl/base/call_once.h
@@ -177,15 +177,8 @@ void CallOnceImpl(std::atomic<uint32_t>* control,
scheduling_mode) == kOnceInit) {
base_internal::invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...);
- // The call to SpinLockWake below is an optimization, because the waiter
- // in SpinLockWait is waiting with a short timeout. The atomic load/store
- // sequence is slightly faster than an atomic exchange:
- // old_control = control->exchange(base_internal::kOnceDone,
- // std::memory_order_release);
- // We opt for a slightly faster case when there are no waiters, in spite
- // of longer tail latency when there are waiters.
- old_control = control->load(std::memory_order_relaxed);
- control->store(base_internal::kOnceDone, std::memory_order_release);
+ old_control =
+ control->exchange(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) {
base_internal::SpinLockWake(control, true);
}
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index e6ac9e64..dce1c854 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -15,11 +15,8 @@
//
// Most users requiring mutual exclusion should use Mutex.
-// SpinLock is provided for use in three situations:
+// SpinLock is provided for use in two situations:
// - for use in code that Mutex itself depends on
-// - to get a faster fast-path release under low contention (without an
-// atomic read-modify-write) In return, SpinLock has worse behaviour under
-// contention, which is why Mutex is preferred in most situations.
// - for async signal safety (see below)
// SpinLock is async signal safe. If a spinlock is used within a signal
diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h
index 169bc749..c34ce41c 100644
--- a/absl/base/internal/spinlock_wait.h
+++ b/absl/base/internal/spinlock_wait.h
@@ -43,18 +43,16 @@ uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode);
-// If possible, wake some thread that has called SpinLockDelay(w, ...). If
-// "all" is true, wake all such threads. This call is a hint, and on some
-// systems it may be a no-op; threads calling SpinLockDelay() will always wake
-// eventually even if SpinLockWake() is never called.
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
-// or may wait for a delay that can be truncated by a call to SpinLockWake(w).
-// In all cases, it must return in bounded time even if SpinLockWake() is not
-// called.
+// or may wait for a call to SpinLockWake(w).
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode);
diff --git a/absl/status/status.h b/absl/status/status.h
index c4d6fce0..9019e6c2 100644
--- a/absl/status/status.h
+++ b/absl/status/status.h
@@ -57,6 +57,7 @@
#include "absl/container/inlined_vector.h"
#include "absl/status/internal/status_internal.h"
#include "absl/strings/cord.h"
+#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
namespace absl {