summaryrefslogtreecommitdiff
path: root/absl/synchronization/mutex.cc
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2023-08-29 23:57:04 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2023-08-29 23:57:48 -0700
commitb06ab1f3554eee58fa0eac046ea79f4b572fa502 (patch)
tree7d4ebec1fb206bf471bbdd3a9885f1ae4aa0d911 /absl/synchronization/mutex.cc
parentf6fc4efa6edf7285512797a5e68373994dd2df27 (diff)
absl: fix a priority bug in CondVar wait morphing
Enqueue updates priority of the queued thread. It was assumed that the queued thread is the current thread. But it's not the case in CondVar wait morhping, where we requeue an existing CondVar waiter on the Mutex. As the result one thread can falsely get priority of another thread. Fix this by not updating priority in this case. And make the assumption explicit and checked. PiperOrigin-RevId: 561249402 Change-Id: I9476c047757090b893a88a2839b795b85fe220ad
Diffstat (limited to 'absl/synchronization/mutex.cc')
-rw-r--r--absl/synchronization/mutex.cc37
1 files changed, 21 insertions, 16 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 3aa5560a..353a8280 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -682,6 +682,7 @@ static const intptr_t kMuOne = 0x0100; // a count of one reader
// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
+static const int kMuIsFer = 0x04; // wait morphing from a CondVar
static_assert(PerThreadSynch::kAlignment > kMuLow,
"PerThreadSynch::kAlignment must be greater than kMuLow");
@@ -920,20 +921,23 @@ static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
s->wake = false; // not being woken
s->cond_waiter = ((flags & kMuIsCond) != 0);
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
- int64_t now_cycles = CycleClock::Now();
- if (s->next_priority_read_cycles < now_cycles) {
- // Every so often, update our idea of the thread's priority.
- // pthread_getschedparam() is 5% of the block/wakeup time;
- // CycleClock::Now() is 0.5%.
- int policy;
- struct sched_param param;
- const int err = pthread_getschedparam(pthread_self(), &policy, &param);
- if (err != 0) {
- ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
- } else {
- s->priority = param.sched_priority;
- s->next_priority_read_cycles =
- now_cycles + static_cast<int64_t>(CycleClock::Frequency());
+ if ((flags & kMuIsFer) == 0) {
+ assert(s == Synch_GetPerThread());
+ int64_t now_cycles = CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+ if (err != 0) {
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+ } else {
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles + static_cast<int64_t>(CycleClock::Frequency());
+ }
}
}
#endif
@@ -2436,7 +2440,8 @@ void Mutex::Fer(PerThreadSynch* w) {
} else {
if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
- PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+ PerThreadSynch* new_h =
+ Enqueue(nullptr, w->waitp, v, kMuIsCond | kMuIsFer);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
if (mu_.compare_exchange_strong(
@@ -2447,7 +2452,7 @@ void Mutex::Fer(PerThreadSynch* w) {
} else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
PerThreadSynch* h = GetPerThreadSynch(v);
- PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+ PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond | kMuIsFer);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
do {