summaryrefslogtreecommitdiff
path: root/absl
diff options
context:
space:
mode:
authorGravatar Dmitry Vyukov <dvyukov@google.com>2023-10-24 09:57:51 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2023-10-24 09:58:25 -0700
commitb841db22f8d1d9cdbaacecf2e7c87ce270f8d96f (patch)
tree24575455b97fe34d6ac029a91de181e9a0f982f6 /absl
parent8b38320373e802c288276a8c1cf3274957a4fa5e (diff)
absl: requeue waiters as LIFO
Currently if a thread already blocked on a Mutex, but then failed to acquire the Mutex, we queue it in FIFO order again. As the result unlucky threads can suffer bad latency if they are requeued several times. The least we can do for them is to queue in LIFO order after blocking. PiperOrigin-RevId: 576174725 Change-Id: I9e2a329d34279a26bd1075b42e3217a5dc065f0a
Diffstat (limited to 'absl')
-rw-r--r--absl/synchronization/mutex.cc18
1 files changed, 18 insertions, 0 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 27fd6a50..47032677 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -991,6 +991,24 @@ static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
s->skip = s->next; // s may skip to its successor
}
+ } else if ((flags & kMuHasBlocked) &&
+ (s->priority >= head->next->priority) &&
+ (!head->maybe_unlocking ||
+ (waitp->how == kExclusive &&
+ Condition::GuaranteedEqual(waitp->cond, nullptr)))) {
+ // This thread has already waited, then was woken, then failed to acquire
+ // the mutex and now tries to requeue. Try to requeue it at head,
+ // otherwise it can suffer bad latency (wait whole queue several times).
+ // However, we need to be conservative. First, we need to ensure that we
+ // respect priorities. Then, we need to be careful to not break wait
+ // queue invariants: we require either that unlocker is not scanning
+ // the queue or that the current thread is a writer with no condition
+ // (unlocker will recheck the queue for such waiters).
+ s->next = head->next;
+ head->next = s;
+ if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
+ s->skip = s->next; // s may skip to its successor
+ }
} else { // enqueue not done any other way, so
// we're inserting s at the back
// s will become new head; copy data from head into it