summaryrefslogtreecommitdiff
path: root/absl/synchronization/mutex.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/synchronization/mutex.cc')
-rw-r--r--absl/synchronization/mutex.cc51
1 files changed, 29 insertions, 22 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 0742680c..e2ee411f 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -648,11 +648,16 @@ static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
// set when a thread is unblocked(INV1a), and threads that were
// unblocked reset the bit when they either acquire or re-block (INV1b).
static const intptr_t kMuDesig = 0x0002L;
-static const intptr_t kMuWait = 0x0004L; // threads are waiting
-static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
-static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
-static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
- // for a reader
+static const intptr_t kMuWait = 0x0004L; // threads are waiting
+static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+// Runnable writer is waiting for a reader.
+// If set, new readers will not lock the mutex to avoid writer starvation.
+// Note: if a reader has higher priority than the writer, it will still lock
+// the mutex ahead of the waiting writer, but in a very inefficient manner:
+// the reader will first queue itself and block, but then the last unlocking
+// reader will wake it.
+static const intptr_t kMuWrWait = 0x0020L;
static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
@@ -919,6 +924,25 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
s->may_skip = true; // always true on entering queue
s->wake = false; // not being woken
s->cond_waiter = ((flags & kMuIsCond) != 0);
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+ int64_t now_cycles = base_internal::CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // base_internal::CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+ if (err != 0) {
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+ } else {
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles +
+ static_cast<int64_t>(base_internal::CycleClock::Frequency());
+ }
+ }
+#endif
if (head == nullptr) { // s is the only waiter
s->next = s; // it's the only entry in the cycle
s->readers = mu; // reader count is from mu word
@@ -927,23 +951,6 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
} else {
PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
- int64_t now_cycles = base_internal::CycleClock::Now();
- if (s->next_priority_read_cycles < now_cycles) {
- // Every so often, update our idea of the thread's priority.
- // pthread_getschedparam() is 5% of the block/wakeup time;
- // base_internal::CycleClock::Now() is 0.5%.
- int policy;
- struct sched_param param;
- const int err = pthread_getschedparam(pthread_self(), &policy, &param);
- if (err != 0) {
- ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
- } else {
- s->priority = param.sched_priority;
- s->next_priority_read_cycles =
- now_cycles +
- static_cast<int64_t>(base_internal::CycleClock::Frequency());
- }
- }
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {