aboutsummaryrefslogtreecommitdiffhomepage
path: root/absl/synchronization
diff options
context:
space:
mode:
authorGravatar misterg <misterg@google.com>2017-09-19 16:54:40 -0400
committerGravatar misterg <misterg@google.com>2017-09-19 16:54:40 -0400
commitc2e754829628d1e9b7a16b3389cfdace76950fdf (patch)
tree5a7f056f44e27c30e10025113b644f0b3b5801fc /absl/synchronization
Initial Commit
Diffstat (limited to 'absl/synchronization')
-rw-r--r--absl/synchronization/BUILD.bazel178
-rw-r--r--absl/synchronization/barrier.cc50
-rw-r--r--absl/synchronization/barrier.h77
-rw-r--r--absl/synchronization/blocking_counter.cc53
-rw-r--r--absl/synchronization/blocking_counter.h96
-rw-r--r--absl/synchronization/blocking_counter_test.cc67
-rw-r--r--absl/synchronization/internal/create_thread_identity.cc110
-rw-r--r--absl/synchronization/internal/create_thread_identity.h53
-rw-r--r--absl/synchronization/internal/graphcycles.cc709
-rw-r--r--absl/synchronization/internal/graphcycles.h136
-rw-r--r--absl/synchronization/internal/graphcycles_test.cc471
-rw-r--r--absl/synchronization/internal/kernel_timeout.h147
-rw-r--r--absl/synchronization/internal/mutex_nonprod.cc311
-rw-r--r--absl/synchronization/internal/mutex_nonprod.inc256
-rw-r--r--absl/synchronization/internal/per_thread_sem.cc106
-rw-r--r--absl/synchronization/internal/per_thread_sem.h107
-rw-r--r--absl/synchronization/internal/per_thread_sem_test.cc246
-rw-r--r--absl/synchronization/internal/thread_pool.h90
-rw-r--r--absl/synchronization/internal/waiter.cc394
-rw-r--r--absl/synchronization/internal/waiter.h138
-rw-r--r--absl/synchronization/mutex.cc2680
-rw-r--r--absl/synchronization/mutex.h1013
-rw-r--r--absl/synchronization/mutex_test.cc1538
-rw-r--r--absl/synchronization/notification.cc84
-rw-r--r--absl/synchronization/notification.h112
-rw-r--r--absl/synchronization/notification_test.cc124
26 files changed, 9346 insertions, 0 deletions
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
new file mode 100644
index 0000000..bddd2ec
--- /dev/null
+++ b/absl/synchronization/BUILD.bazel
@@ -0,0 +1,178 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+ "//absl:copts.bzl",
+ "ABSL_DEFAULT_COPTS",
+ "ABSL_TEST_COPTS",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+# Internal data structure for efficiently detecting mutex dependency cycles
+cc_library(
+ name = "graphcycles_internal",
+ srcs = [
+ "internal/graphcycles.cc",
+ ],
+ hdrs = [
+ "internal/graphcycles.h",
+ ],
+ copts = ABSL_DEFAULT_COPTS,
+ deps = [
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/base:malloc_internal",
+ ],
+)
+
+cc_library(
+ name = "synchronization",
+ srcs = [
+ "barrier.cc",
+ "blocking_counter.cc",
+ "internal/create_thread_identity.cc",
+ "internal/per_thread_sem.cc",
+ "internal/waiter.cc",
+ "notification.cc",
+ ] + select({
+ "//conditions:default": ["mutex.cc"],
+ }),
+ hdrs = [
+ "barrier.h",
+ "blocking_counter.h",
+ "internal/create_thread_identity.h",
+ "internal/kernel_timeout.h",
+ "internal/mutex_nonprod.inc",
+ "internal/per_thread_sem.h",
+ "internal/waiter.h",
+ "mutex.h",
+ "notification.h",
+ ],
+ copts = ABSL_DEFAULT_COPTS,
+ deps = [
+ ":graphcycles_internal",
+ "//absl/base",
+ "//absl/base:base_internal",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
+ "//absl/base:malloc_extension",
+ "//absl/base:malloc_internal",
+ "//absl/debugging:stacktrace",
+ "//absl/time",
+ ],
+)
+
+cc_test(
+ name = "blocking_counter_test",
+ size = "small",
+ srcs = ["blocking_counter_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":synchronization",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "graphcycles_test",
+ size = "medium",
+ srcs = ["internal/graphcycles_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":graphcycles_internal",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "thread_pool",
+ testonly = 1,
+ hdrs = ["internal/thread_pool.h"],
+ deps = [
+ ":synchronization",
+ "//absl/base:core_headers",
+ ],
+)
+
+cc_test(
+ name = "mutex_test",
+ size = "large",
+ timeout = "moderate",
+ srcs = ["mutex_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ tags = [
+ "no_test_loonix", # Too slow.
+ ],
+ deps = [
+ ":synchronization",
+ ":thread_pool",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/memory",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "notification_test",
+ size = "small",
+ srcs = ["notification_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":synchronization",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "per_thread_sem_test_common",
+ testonly = 1,
+ srcs = ["internal/per_thread_sem_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":synchronization",
+ "//absl/base",
+ "//absl/base:malloc_extension",
+ "//absl/strings",
+ "//absl/time",
+ "@com_google_googletest//:gtest",
+ ],
+ alwayslink = 1,
+)
+
+cc_test(
+ name = "per_thread_sem_test",
+ size = "medium",
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":per_thread_sem_test_common",
+ ":synchronization",
+ "//absl/base",
+ "//absl/base:malloc_extension",
+ "//absl/strings",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
diff --git a/absl/synchronization/barrier.cc b/absl/synchronization/barrier.cc
new file mode 100644
index 0000000..a1b3ad5
--- /dev/null
+++ b/absl/synchronization/barrier.cc
@@ -0,0 +1,50 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/barrier.h"
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+
+// Return whether int *arg is zero.
+static bool IsZero(void *arg) {
+ return 0 == *reinterpret_cast<int *>(arg);
+}
+
+bool Barrier::Block() {
+ MutexLock l(&this->lock_);
+
+ this->num_to_block_--;
+ if (this->num_to_block_ < 0) {
+ ABSL_RAW_LOG(
+ FATAL,
+ "Block() called too many times. num_to_block_=%d out of total=%d",
+ this->num_to_block_, this->num_to_exit_);
+ }
+
+ this->lock_.Await(Condition(IsZero, &this->num_to_block_));
+
+ // Determine which thread can safely delete this Barrier object
+ this->num_to_exit_--;
+ ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
+
+ // If num_to_exit_ == 0 then all other threads in the barrier have
+ // exited the Wait() and have released the Mutex so this thread is
+ // free to delete the barrier.
+ return this->num_to_exit_ == 0;
+}
+
+} // namespace absl
diff --git a/absl/synchronization/barrier.h b/absl/synchronization/barrier.h
new file mode 100644
index 0000000..f834fee
--- /dev/null
+++ b/absl/synchronization/barrier.h
@@ -0,0 +1,77 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// barrier.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_SYNCHRONIZATION_BARRIER_H_
+#define ABSL_SYNCHRONIZATION_BARRIER_H_
+
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+
+// Barrier
+//
+// This class creates a barrier which blocks threads until a prespecified
+// threshold of threads (`num_threads`) utilizes the barrier. A thread utilizes
+// the `Barrier` by calling `Block()` on the barrier, which will block that
+// thread; no call to `Block()` will return until `num_threads` threads have
+// called it.
+//
+// Exactly one call to `Block()` will return `true`, which is then responsible
+// for destroying the barrier; because stack allocation will cause the barrier
+// to be deleted when it is out of scope, barriers should not be stack
+// allocated.
+//
+// Example:
+//
+// // Main thread creates a `Barrier`:
+// barrier = new Barrier(num_threads);
+//
+// // Each participating thread could then call:
+// if (barrier->Block()) delete barrier; // Exactly one call to `Block()`
+// // returns `true`; that call
+// // deletes the barrier.
+class Barrier {
+ public:
+ // `num_threads` is the number of threads that will participate in the barrier
+ explicit Barrier(int num_threads)
+ : num_to_block_(num_threads), num_to_exit_(num_threads) {}
+
+ Barrier(const Barrier&) = delete;
+ Barrier& operator=(const Barrier&) = delete;
+
+ // Barrier::Block()
+ //
+ // Blocks the current thread, and returns only when the `num_threads`
+ // threshold of threads utilizing this barrier has been reached. `Block()`
+ // returns `true` for precisely one caller, which may then destroy the
+ // barrier.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before X calls `Block()` will be visible to Y after Y returns from
+ // `Block()`.
+ bool Block();
+
+ private:
+ Mutex lock_;
+ int num_to_block_ GUARDED_BY(lock_);
+ int num_to_exit_ GUARDED_BY(lock_);
+};
+
+} // namespace absl
+#endif // ABSL_SYNCHRONIZATION_BARRIER_H_
diff --git a/absl/synchronization/blocking_counter.cc b/absl/synchronization/blocking_counter.cc
new file mode 100644
index 0000000..48e3650
--- /dev/null
+++ b/absl/synchronization/blocking_counter.cc
@@ -0,0 +1,53 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/blocking_counter.h"
+
+namespace absl {
+
+// Return whether int *arg is zero.
+static bool IsZero(void *arg) {
+ return 0 == *reinterpret_cast<int *>(arg);
+}
+
+bool BlockingCounter::DecrementCount() {
+ MutexLock l(&lock_);
+ count_--;
+ if (count_ < 0) {
+ ABSL_RAW_LOG(
+ FATAL,
+ "BlockingCounter::DecrementCount() called too many times. count=%d",
+ count_);
+ }
+ return count_ == 0;
+}
+
+void BlockingCounter::Wait() {
+ MutexLock l(&this->lock_);
+ ABSL_RAW_CHECK(count_ >= 0, "BlockingCounter underflow");
+
+ // only one thread may call Wait(). To support more than one thread,
+ // implement a counter num_to_exit, like in the Barrier class.
+ ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()");
+ num_waiting_++;
+
+ this->lock_.Await(Condition(IsZero, &this->count_));
+
+ // At this point, We know that all threads executing DecrementCount have
+ // released the lock, and so will not touch this object again.
+ // Therefore, the thread calling this method is free to delete the object
+ // after we return from this method.
+}
+
+} // namespace absl
diff --git a/absl/synchronization/blocking_counter.h b/absl/synchronization/blocking_counter.h
new file mode 100644
index 0000000..476d5f8
--- /dev/null
+++ b/absl/synchronization/blocking_counter.h
@@ -0,0 +1,96 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// blocking_counter.h
+// -----------------------------------------------------------------------------
+
+#ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
+#define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
+
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+
+// BlockingCounter
+//
+// This class allows a thread to block for a pre-specified number of actions.
+// `BlockingCounter` maintains a single non-negative abstract integer "count"
+// with an initial value `initial_count`. A thread can then call `Wait()` on
+// this blocking counter to block until the specified number of events occur;
+// worker threads then call 'DecrementCount()` on the counter upon completion of
+// their work. Once the counter's internal "count" reaches zero, the blocked
+// thread unblocks.
+//
+// A `BlockingCounter` requires the following:
+// - its `initial_count` is non-negative.
+// - the number of calls to `DecrementCount()` on it is at most
+// `initial_count`.
+// - `Wait()` is called at most once on it.
+//
+// Given the above requirements, a `BlockingCounter` provides the following
+// guarantees:
+// - Once its internal "count" reaches zero, no legal action on the object
+// can further change the value of "count".
+// - When `Wait()` returns, it is legal to destroy the `BlockingCounter`.
+// - When `Wait()` returns, the number of calls to `DecrementCount()` on
+// this blocking counter exactly equals `initial_count`.
+//
+// Example:
+// BlockingCounter bcount(N); // there are N items of work
+// ... Allow worker threads to start.
+// ... On completing each work item, workers do:
+// ... bcount.DecrementCount(); // an item of work has been completed
+//
+// bcount.Wait(); // wait for all work to be complete
+//
+class BlockingCounter {
+ public:
+ explicit BlockingCounter(int initial_count)
+ : count_(initial_count), num_waiting_(0) {}
+
+ BlockingCounter(const BlockingCounter&) = delete;
+ BlockingCounter& operator=(const BlockingCounter&) = delete;
+
+ // BlockingCounter::DecrementCount()
+ //
+ // Decrements the counter's "count" by one, and return "count == 0". This
+ // function requires that "count != 0" when it is called.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before it calls `DecrementCount()` is visible to thread Y after
+ // Y's call to `DecrementCount()`, provided Y's call returns `true`.
+ bool DecrementCount();
+
+ // BlockingCounter::Wait()
+ //
+ // Blocks until the counter reaches zero. This function may be called at most
+ // once. On return, `DecrementCount()` will have been called "initial_count"
+ // times and the blocking counter may be destroyed.
+ //
+ // Memory ordering: For any threads X and Y, any action taken by X
+ // before X calls `DecrementCount()` is visible to Y after Y returns
+ // from `Wait()`.
+ void Wait();
+
+ private:
+ Mutex lock_;
+ int count_ GUARDED_BY(lock_);
+ int num_waiting_ GUARDED_BY(lock_);
+};
+
+} // namespace absl
+#endif // ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_
diff --git a/absl/synchronization/blocking_counter_test.cc b/absl/synchronization/blocking_counter_test.cc
new file mode 100644
index 0000000..b4b6677
--- /dev/null
+++ b/absl/synchronization/blocking_counter_test.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/blocking_counter.h"
+
+#include <functional>
+#include <memory>
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/time/clock.h"
+
+namespace absl {
+namespace {
+
+void PauseAndDecreaseCounter(BlockingCounter* counter, int* done) {
+ absl::SleepFor(absl::Seconds(1));
+ *done = 1;
+ counter->DecrementCount();
+}
+
+TEST(BlockingCounterTest, BasicFunctionality) {
+ // This test verifies that BlockingCounter functions correctly. Starts a
+ // number of threads that just sleep for a second and decrement a counter.
+
+ // Initialize the counter.
+ const int num_workers = 10;
+ BlockingCounter counter(num_workers);
+
+ std::vector<std::thread> workers;
+ std::vector<int> done(num_workers, 0);
+
+ // Start a number of parallel tasks that will just wait for a seconds and
+ // then decrement the count.
+ workers.reserve(num_workers);
+ for (int k = 0; k < num_workers; k++) {
+ workers.emplace_back(
+ [&counter, &done, k] { PauseAndDecreaseCounter(&counter, &done[k]); });
+ }
+
+ // Wait for the threads to have all finished.
+ counter.Wait();
+
+ // Check that all the workers have completed.
+ for (int k = 0; k < num_workers; k++) {
+ EXPECT_EQ(1, done[k]);
+ }
+
+ for (std::thread& w : workers) {
+ w.join();
+ }
+}
+
+} // namespace
+} // namespace absl
diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc
new file mode 100644
index 0000000..1497634
--- /dev/null
+++ b/absl/synchronization/internal/create_thread_identity.cc
@@ -0,0 +1,110 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include <string.h>
+#include <atomic>
+#include <memory>
+
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+// ThreadIdentity storage is persistent, we maintain a free-list of previously
+// released ThreadIdentity objects.
+static base_internal::SpinLock freelist_lock(base_internal::kLinkerInitialized);
+static base_internal::ThreadIdentity* thread_identity_freelist;
+
+// A per-thread destructor for reclaiming associated ThreadIdentity objects.
+// Since we must preserve their storage we cache them for re-use.
+static void ReclaimThreadIdentity(void* v) {
+ base_internal::ThreadIdentity* identity =
+ static_cast<base_internal::ThreadIdentity*>(v);
+
+ // all_locks might have been allocated by the Mutex implementation.
+ // We free it here when we are notified that our thread is dying.
+ if (identity->per_thread_synch.all_locks != nullptr) {
+ base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
+ }
+
+ // We must explicitly clear the current thread's identity:
+ // (a) Subsequent (unrelated) per-thread destructors may require an identity.
+ // We must guarantee a new identity is used in this case (this instructor
+ // will be reinvoked up to PTHREAD_DESTRUCTOR_ITERATIONS in this case).
+ // (b) ThreadIdentity implementations may depend on memory that is not
+ // reinitialized before reuse. We must allow explicit clearing of the
+ // association state in this case.
+ base_internal::ClearCurrentThreadIdentity();
+ {
+ base_internal::SpinLockHolder l(&freelist_lock);
+ identity->next = thread_identity_freelist;
+ thread_identity_freelist = identity;
+ }
+}
+
+// Return value rounded up to next multiple of align.
+// Align must be a power of two.
+static intptr_t RoundUp(intptr_t addr, intptr_t align) {
+ return (addr + align - 1) & ~(align - 1);
+}
+
+static base_internal::ThreadIdentity* NewThreadIdentity() {
+ base_internal::ThreadIdentity* identity = nullptr;
+
+ {
+ // Re-use a previously released object if possible.
+ base_internal::SpinLockHolder l(&freelist_lock);
+ if (thread_identity_freelist) {
+ identity = thread_identity_freelist; // Take list-head.
+ thread_identity_freelist = thread_identity_freelist->next;
+ }
+ }
+
+ if (identity == nullptr) {
+ // Allocate enough space to align ThreadIdentity to a multiple of
+ // PerThreadSynch::kAlignment. This space is never released (it is
+ // added to a freelist by ReclaimThreadIdentity instead).
+ void* allocation = base_internal::LowLevelAlloc::Alloc(
+ sizeof(*identity) + base_internal::PerThreadSynch::kAlignment - 1);
+ // Round up the address to the required alignment.
+ identity = reinterpret_cast<base_internal::ThreadIdentity*>(
+ RoundUp(reinterpret_cast<intptr_t>(allocation),
+ base_internal::PerThreadSynch::kAlignment));
+ }
+ memset(identity, 0, sizeof(*identity));
+
+ return identity;
+}
+
+// Allocates and attaches ThreadIdentity object for the calling thread. Returns
+// the new identity.
+// REQUIRES: CurrentThreadIdentity(false) == nullptr
+base_internal::ThreadIdentity* CreateThreadIdentity() {
+ base_internal::ThreadIdentity* identity = NewThreadIdentity();
+ PerThreadSem::Init(identity);
+ // Associate the value with the current thread, and attach our destructor.
+ base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
+ return identity;
+}
+
+} // namespace synchronization_internal
+} // namespace absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/absl/synchronization/internal/create_thread_identity.h b/absl/synchronization/internal/create_thread_identity.h
new file mode 100644
index 0000000..1bb87de
--- /dev/null
+++ b/absl/synchronization/internal/create_thread_identity.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 The Abseil Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Interface for getting the current ThreadIdentity, creating one if necessary.
+// See thread_identity.h.
+//
+// This file is separate from thread_identity.h because creating a new
+// ThreadIdentity requires slightly higher level libraries (per_thread_sem
+// and low_level_alloc) than accessing an existing one. This separation allows
+// us to have a smaller //absl/base:base.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/port.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+// Allocates and attaches a ThreadIdentity object for the calling thread.
+// For private use only.
+base_internal::ThreadIdentity* CreateThreadIdentity();
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime. The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist for the calling thread, allocate it now.
+inline base_internal::ThreadIdentity* GetOrCreateCurrentThreadIdentity() {
+ base_internal::ThreadIdentity* identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ if (ABSL_PREDICT_FALSE(identity == nullptr)) {
+ return CreateThreadIdentity();
+ }
+ return identity;
+}
+
+} // namespace synchronization_internal
+} // namespace absl
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_CREATE_THREAD_IDENTITY_H_
diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc
new file mode 100644
index 0000000..d7ae0cf
--- /dev/null
+++ b/absl/synchronization/internal/graphcycles.cc
@@ -0,0 +1,709 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// GraphCycles provides incremental cycle detection on a dynamic
+// graph using the following algorithm:
+//
+// A dynamic topological sort algorithm for directed acyclic graphs
+// David J. Pearce, Paul H. J. Kelly
+// Journal of Experimental Algorithmics (JEA) JEA Homepage archive
+// Volume 11, 2006, Article No. 1.7
+//
+// Brief summary of the algorithm:
+//
+// (1) Maintain a rank for each node that is consistent
+// with the topological sort of the graph. I.e., path from x to y
+// implies rank[x] < rank[y].
+// (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
+// (3) Otherwise: adjust ranks in the neighborhood of x and y.
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "absl/synchronization/internal/graphcycles.h"
+
+#include <algorithm>
+#include <array>
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+// Do not use STL. This module does not use standard memory allocation.
+
+namespace absl {
+namespace synchronization_internal {
+
+namespace {
+
+// Avoid LowLevelAlloc's default arena since it calls malloc hooks in
+// which people are doing things like acquiring Mutexes.
+static absl::base_internal::SpinLock arena_mu(
+ absl::base_internal::kLinkerInitialized);
+static base_internal::LowLevelAlloc::Arena* arena;
+
+static void InitArenaIfNecessary() {
+ arena_mu.Lock();
+ if (arena == nullptr) {
+ arena = base_internal::LowLevelAlloc::NewArena(
+ 0, base_internal::LowLevelAlloc::DefaultArena());
+ }
+ arena_mu.Unlock();
+}
+
+// Number of inlined elements in Vec. Hash table implementation
+// relies on this being a power of two.
+static const uint32_t kInline = 8;
+
+// A simple LowLevelAlloc based resizable vector with inlined storage
+// for a few elements. T must be a plain type since constructor
+// and destructor are not run on elements of type T managed by Vec.
+template <typename T>
+class Vec {
+ public:
+ Vec() { Init(); }
+ ~Vec() { Discard(); }
+
+ void clear() {
+ Discard();
+ Init();
+ }
+
+ bool empty() const { return size_ == 0; }
+ uint32_t size() const { return size_; }
+ T* begin() { return ptr_; }
+ T* end() { return ptr_ + size_; }
+ const T& operator[](uint32_t i) const { return ptr_[i]; }
+ T& operator[](uint32_t i) { return ptr_[i]; }
+ const T& back() const { return ptr_[size_-1]; }
+ void pop_back() { size_--; }
+
+ void push_back(const T& v) {
+ if (size_ == capacity_) Grow(size_ + 1);
+ ptr_[size_] = v;
+ size_++;
+ }
+
+ void resize(uint32_t n) {
+ if (n > capacity_) Grow(n);
+ size_ = n;
+ }
+
+ void fill(const T& val) {
+ for (uint32_t i = 0; i < size(); i++) {
+ ptr_[i] = val;
+ }
+ }
+
+ // Guarantees src is empty at end.
+ // Provided for the hash table resizing code below.
+ void MoveFrom(Vec<T>* src) {
+ if (src->ptr_ == src->space_) {
+ // Need to actually copy
+ resize(src->size_);
+ std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+ src->size_ = 0;
+ } else {
+ Discard();
+ ptr_ = src->ptr_;
+ size_ = src->size_;
+ capacity_ = src->capacity_;
+ src->Init();
+ }
+ }
+
+ private:
+ T* ptr_;
+ T space_[kInline];
+ uint32_t size_;
+ uint32_t capacity_;
+
+ void Init() {
+ ptr_ = space_;
+ size_ = 0;
+ capacity_ = kInline;
+ }
+
+ void Discard() {
+ if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
+ }
+
+ void Grow(uint32_t n) {
+ while (capacity_ < n) {
+ capacity_ *= 2;
+ }
+ size_t request = static_cast<size_t>(capacity_) * sizeof(T);
+ T* copy = static_cast<T*>(
+ base_internal::LowLevelAlloc::AllocWithArena(request, arena));
+ std::copy(ptr_, ptr_ + size_, copy);
+ Discard();
+ ptr_ = copy;
+ }
+
+ Vec(const Vec&) = delete;
+ Vec& operator=(const Vec&) = delete;
+};
+
+// A hash set of non-negative int32_t that uses Vec for its underlying storage.
+class NodeSet {
+ public:
+ NodeSet() { Init(); }
+
+ void clear() { Init(); }
+ bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
+
+ bool insert(int32_t v) {
+ uint32_t i = FindIndex(v);
+ if (table_[i] == v) {
+ return false;
+ }
+ if (table_[i] == kEmpty) {
+ // Only inserting over an empty cell increases the number of occupied
+ // slots.
+ occupied_++;
+ }
+ table_[i] = v;
+ // Double when 75% full.
+ if (occupied_ >= table_.size() - table_.size()/4) Grow();
+ return true;
+ }
+
+ void erase(uint32_t v) {
+ uint32_t i = FindIndex(v);
+ if (static_cast<uint32_t>(table_[i]) == v) {
+ table_[i] = kDel;
+ }
+ }
+
+ // Iteration: is done via HASH_FOR_EACH
+ // Example:
+ // HASH_FOR_EACH(elem, node->out) { ... }
+#define HASH_FOR_EACH(elem, eset) \
+ for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
+ bool Next(int32_t* cursor, int32_t* elem) {
+ while (static_cast<uint32_t>(*cursor) < table_.size()) {
+ int32_t v = table_[*cursor];
+ (*cursor)++;
+ if (v >= 0) {
+ *elem = v;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private:
+ static const int32_t kEmpty;
+ static const int32_t kDel;
+ Vec<int32_t> table_;
+ uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
+
+ static uint32_t Hash(uint32_t a) { return a * 41; }
+
+ // Return index for storing v. May return an empty index or deleted index
+ int FindIndex(int32_t v) const {
+ // Search starting at hash index.
+ const uint32_t mask = table_.size() - 1;
+ uint32_t i = Hash(v) & mask;
+ int deleted_index = -1; // If >= 0, index of first deleted element we see
+ while (true) {
+ int32_t e = table_[i];
+ if (v == e) {
+ return i;
+ } else if (e == kEmpty) {
+ // Return any previously encountered deleted slot.
+ return (deleted_index >= 0) ? deleted_index : i;
+ } else if (e == kDel && deleted_index < 0) {
+ // Keep searching since v might be present later.
+ deleted_index = i;
+ }
+ i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
+ }
+ }
+
+ void Init() {
+ table_.clear();
+ table_.resize(kInline);
+ table_.fill(kEmpty);
+ occupied_ = 0;
+ }
+
+ void Grow() {
+ Vec<int32_t> copy;
+ copy.MoveFrom(&table_);
+ occupied_ = 0;
+ table_.resize(copy.size() * 2);
+ table_.fill(kEmpty);
+
+ for (const auto& e : copy) {
+ if (e >= 0) insert(e);
+ }
+ }
+
+ NodeSet(const NodeSet&) = delete;
+ NodeSet& operator=(const NodeSet&) = delete;
+};
+
+const int32_t NodeSet::kEmpty = -1;
+const int32_t NodeSet::kDel = -2;
+
+// We encode a node index and a node version in GraphId. The version
+// number is incremented when the GraphId is freed which automatically
+// invalidates all copies of the GraphId.
+
+inline GraphId MakeId(int32_t index, uint32_t version) {
+ GraphId g;
+ g.handle =
+ (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
+ return g;
+}
+
+inline int32_t NodeIndex(GraphId id) {
+ return static_cast<uint32_t>(id.handle & 0xfffffffful);
+}
+
+inline uint32_t NodeVersion(GraphId id) {
+ return static_cast<uint32_t>(id.handle >> 32);
+}
+
+// We need to hide Mutexes (or other deadlock detection's pointers)
+// from the leak detector. Xor with an arbitrary number with high bits set.
+static const uintptr_t kHideMask = static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll);
+
+static inline uintptr_t MaskPtr(void *ptr) {
+ return reinterpret_cast<uintptr_t>(ptr) ^ kHideMask;
+}
+
+static inline void* UnmaskPtr(uintptr_t word) {
+ return reinterpret_cast<void*>(word ^ kHideMask);
+}
+
+struct Node {
+ int32_t rank; // rank number assigned by Pearce-Kelly algorithm
+ uint32_t version; // Current version number
+ int32_t next_hash; // Next entry in hash table
+ bool visited; // Temporary marker used by depth-first-search
+ uintptr_t masked_ptr; // User-supplied pointer
+ NodeSet in; // List of immediate predecessor nodes in graph
+ NodeSet out; // List of immediate successor nodes in graph
+ int priority; // Priority of recorded stack trace.
+ int nstack; // Depth of recorded stack trace.
+ void* stack[40]; // stack[0,nstack-1] holds stack trace for node.
+};
+
+// Hash table for pointer to node index lookups.
+class PointerMap {
+ public:
+ explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
+ table_.fill(-1);
+ }
+
+ int32_t Find(void* ptr) {
+ auto masked = MaskPtr(ptr);
+ for (int32_t i = table_[Hash(ptr)]; i != -1;) {
+ Node* n = (*nodes_)[i];
+ if (n->masked_ptr == masked) return i;
+ i = n->next_hash;
+ }
+ return -1;
+ }
+
+ void Add(void* ptr, int32_t i) {
+ int32_t* head = &table_[Hash(ptr)];
+ (*nodes_)[i]->next_hash = *head;
+ *head = i;
+ }
+
+ int32_t Remove(void* ptr) {
+ // Advance through linked list while keeping track of the
+ // predecessor slot that points to the current entry.
+ auto masked = MaskPtr(ptr);
+ for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
+ int32_t index = *slot;
+ Node* n = (*nodes_)[index];
+ if (n->masked_ptr == masked) {
+ *slot = n->next_hash; // Remove n from linked list
+ n->next_hash = -1;
+ return index;
+ }
+ slot = &n->next_hash;
+ }
+ return -1;
+ }
+
+ private:
+ // Number of buckets in hash table for pointer lookups.
+ static constexpr uint32_t kHashTableSize = 8171; // should be prime
+
+ const Vec<Node*>* nodes_;
+ std::array<int32_t, kHashTableSize> table_;
+
+ static uint32_t Hash(void* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
+ }
+};
+
+} // namespace
+
+struct GraphCycles::Rep {
+ Vec<Node*> nodes_;
+ Vec<int32_t> free_nodes_; // Indices for unused entries in nodes_
+ PointerMap ptrmap_;
+
+ // Temporary state.
+ Vec<int32_t> deltaf_; // Results of forward DFS
+ Vec<int32_t> deltab_; // Results of backward DFS
+ Vec<int32_t> list_; // All nodes to reprocess
+ Vec<int32_t> merged_; // Rank values to assign to list_ entries
+ Vec<int32_t> stack_; // Emulates recursion stack for depth-first searches
+
+ Rep() : ptrmap_(&nodes_) {}
+};
+
+static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
+ Node* n = rep->nodes_[NodeIndex(id)];
+ return (n->version == NodeVersion(id)) ? n : nullptr;
+}
+
+GraphCycles::GraphCycles() {
+ InitArenaIfNecessary();
+ rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
+ Rep;
+}
+
+GraphCycles::~GraphCycles() {
+ for (auto* node : rep_->nodes_) {
+ node->Node::~Node();
+ base_internal::LowLevelAlloc::Free(node);
+ }
+ rep_->Rep::~Rep();
+ base_internal::LowLevelAlloc::Free(rep_);
+}
+
+bool GraphCycles::CheckInvariants() const {
+ Rep* r = rep_;
+ NodeSet ranks; // Set of ranks seen so far.
+ for (uint32_t x = 0; x < r->nodes_.size(); x++) {
+ Node* nx = r->nodes_[x];
+ void* ptr = UnmaskPtr(nx->masked_ptr);
+ if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
+ ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+ }
+ if (nx->visited) {
+ ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+ }
+ if (!ranks.insert(nx->rank)) {
+ ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+ }
+ HASH_FOR_EACH(y, nx->out) {
+ Node* ny = r->nodes_[y];
+ if (nx->rank >= ny->rank) {
+ ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
+ nx->rank, ny->rank);
+ }
+ }
+ }
+ return true;
+}
+
+GraphId GraphCycles::GetId(void* ptr) {
+ int32_t i = rep_->ptrmap_.Find(ptr);
+ if (i != -1) {
+ return MakeId(i, rep_->nodes_[i]->version);
+ } else if (rep_->free_nodes_.empty()) {
+ Node* n =
+ new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
+ Node;
+ n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
+ n->visited = false;
+ n->rank = rep_->nodes_.size();
+ n->masked_ptr = MaskPtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->nodes_.push_back(n);
+ rep_->ptrmap_.Add(ptr, n->rank);
+ return MakeId(n->rank, n->version);
+ } else {
+ // Preserve preceding rank since the set of ranks in use must be
+ // a permutation of [0,rep_->nodes_.size()-1].
+ int32_t r = rep_->free_nodes_.back();
+ rep_->free_nodes_.pop_back();
+ Node* n = rep_->nodes_[r];
+ n->masked_ptr = MaskPtr(ptr);
+ n->nstack = 0;
+ n->priority = 0;
+ rep_->ptrmap_.Add(ptr, r);
+ return MakeId(r, n->version);
+ }
+}
+
+void GraphCycles::RemoveNode(void* ptr) {
+ int32_t i = rep_->ptrmap_.Remove(ptr);
+ if (i == -1) {
+ return;
+ }
+ Node* x = rep_->nodes_[i];
+ HASH_FOR_EACH(y, x->out) {
+ rep_->nodes_[y]->in.erase(i);
+ }
+ HASH_FOR_EACH(y, x->in) {
+ rep_->nodes_[y]->out.erase(i);
+ }
+ x->in.clear();
+ x->out.clear();
+ x->masked_ptr = MaskPtr(nullptr);
+ if (x->version == std::numeric_limits<uint32_t>::max()) {
+ // Cannot use x any more
+ } else {
+ x->version++; // Invalidates all copies of node.
+ rep_->free_nodes_.push_back(i);
+ }
+}
+
+void* GraphCycles::Ptr(GraphId id) {
+ Node* n = FindNode(rep_, id);
+ return n == nullptr ? nullptr : UnmaskPtr(n->masked_ptr);
+}
+
+bool GraphCycles::HasNode(GraphId node) {
+ return FindNode(rep_, node) != nullptr;
+}
+
+bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
+ Node* xn = FindNode(rep_, x);
+ return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
+}
+
+void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
+ Node* xn = FindNode(rep_, x);
+ Node* yn = FindNode(rep_, y);
+ if (xn && yn) {
+ xn->out.erase(NodeIndex(y));
+ yn->in.erase(NodeIndex(x));
+ // No need to update the rank assignment since a previous valid
+ // rank assignment remains valid after an edge deletion.
+ }
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
+static void Reorder(GraphCycles::Rep* r);
+static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
+
+bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
+ Rep* r = rep_;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+ Node* nx = FindNode(r, idx);
+ Node* ny = FindNode(r, idy);
+ if (nx == nullptr || ny == nullptr) return true; // Expired ids
+
+ if (nx == ny) return false; // Self edge
+ if (!nx->out.insert(y)) {
+ // Edge already exists.
+ return true;
+ }
+
+ ny->in.insert(x);
+
+ if (nx->rank <= ny->rank) {
+ // New edge is consistent with existing rank assignment.
+ return true;
+ }
+
+ // Current rank assignments are incompatible with the new edge. Recompute.
+ // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
+ if (!ForwardDFS(r, y, nx->rank)) {
+ // Found a cycle. Undo the insertion and tell caller.
+ nx->out.erase(y);
+ ny->in.erase(x);
+ // Since we do not call Reorder() on this path, clear any visited
+ // markers left by ForwardDFS.
+ for (const auto& d : r->deltaf_) {
+ r->nodes_[d]->visited = false;
+ }
+ return false;
+ }
+ BackwardDFS(r, x, ny->rank);
+ Reorder(r);
+ return true;
+}
+
+static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
+ // Avoid recursion since stack space might be limited.
+ // We instead keep a stack of nodes to visit.
+ r->deltaf_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltaf_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->out) {
+ Node* nw = r->nodes_[w];
+ if (nw->rank == upper_bound) {
+ return false; // Cycle
+ }
+ if (!nw->visited && nw->rank < upper_bound) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+ return true;
+}
+
+static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
+ r->deltab_.clear();
+ r->stack_.clear();
+ r->stack_.push_back(n);
+ while (!r->stack_.empty()) {
+ n = r->stack_.back();
+ r->stack_.pop_back();
+ Node* nn = r->nodes_[n];
+ if (nn->visited) continue;
+
+ nn->visited = true;
+ r->deltab_.push_back(n);
+
+ HASH_FOR_EACH(w, nn->in) {
+ Node* nw = r->nodes_[w];
+ if (!nw->visited && lower_bound < nw->rank) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+}
+
+static void Reorder(GraphCycles::Rep* r) {
+ Sort(r->nodes_, &r->deltab_);
+ Sort(r->nodes_, &r->deltaf_);
+
+ // Adds contents of delta lists to list_ (backwards deltas first).
+ r->list_.clear();
+ MoveToList(r, &r->deltab_, &r->list_);
+ MoveToList(r, &r->deltaf_, &r->list_);
+
+ // Produce sorted list of all ranks that will be reassigned.
+ r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
+ std::merge(r->deltab_.begin(), r->deltab_.end(),
+ r->deltaf_.begin(), r->deltaf_.end(),
+ r->merged_.begin());
+
+ // Assign the ranks in order to the collected list.
+ for (uint32_t i = 0; i < r->list_.size(); i++) {
+ r->nodes_[r->list_[i]]->rank = r->merged_[i];
+ }
+}
+
+static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
+ struct ByRank {
+ const Vec<Node*>* nodes;
+ bool operator()(int32_t a, int32_t b) const {
+ return (*nodes)[a]->rank < (*nodes)[b]->rank;
+ }
+ };
+ ByRank cmp;
+ cmp.nodes = &nodes;
+ std::sort(delta->begin(), delta->end(), cmp);
+}
+
+static void MoveToList(
+ GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
+ for (auto& v : *src) {
+ int32_t w = v;
+ v = r->nodes_[w]->rank; // Replace v entry with its rank
+ r->nodes_[w]->visited = false; // Prepare for future DFS calls
+ dst->push_back(w);
+ }
+}
+
+int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
+ GraphId path[]) const {
+ Rep* r = rep_;
+ if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
+ const int32_t x = NodeIndex(idx);
+ const int32_t y = NodeIndex(idy);
+
+ // Forward depth first search starting at x until we hit y.
+ // As we descend into a node, we push it onto the path.
+ // As we leave a node, we remove it from the path.
+ int path_len = 0;
+
+ NodeSet seen;
+ r->stack_.clear();
+ r->stack_.push_back(x);
+ while (!r->stack_.empty()) {
+ int32_t n = r->stack_.back();
+ r->stack_.pop_back();
+ if (n < 0) {
+ // Marker to indicate that we are leaving a node
+ path_len--;
+ continue;
+ }
+
+ if (path_len < max_path_len) {
+ path[path_len] = MakeId(n, rep_->nodes_[n]->version);
+ }
+ path_len++;
+ r->stack_.push_back(-1); // Will remove tentative path entry
+
+ if (n == y) {
+ return path_len;
+ }
+
+ HASH_FOR_EACH(w, r->nodes_[n]->out) {
+ if (seen.insert(w)) {
+ r->stack_.push_back(w);
+ }
+ }
+ }
+
+ return 0;
+}
+
+bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
+ return FindPath(x, y, 0, nullptr) > 0;
+}
+
+void GraphCycles::UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void** stack, int)) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr || n->priority >= priority) {
+ return;
+ }
+ n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
+ n->priority = priority;
+}
+
+int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
+ Node* n = FindNode(rep_, id);
+ if (n == nullptr) {
+ *ptr = nullptr;
+ return 0;
+ } else {
+ *ptr = n->stack;
+ return n->nstack;
+ }
+}
+
+} // namespace synchronization_internal
+} // namespace absl
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/absl/synchronization/internal/graphcycles.h b/absl/synchronization/internal/graphcycles.h
new file mode 100644
index 0000000..53474b7
--- /dev/null
+++ b/absl/synchronization/internal/graphcycles.h
@@ -0,0 +1,136 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_GRAPHCYCLES_H_
+
+// GraphCycles detects the introduction of a cycle into a directed
+// graph that is being built up incrementally.
+//
+// Nodes are identified by small integers. It is not possible to
+// record multiple edges with the same (source, destination) pair;
+// requests to add an edge where one already exists are silently
+// ignored.
+//
+// It is also not possible to introduce a cycle; an attempt to insert
+// an edge that would introduce a cycle fails and returns false.
+//
+// GraphCycles uses no internal locking; calls into it should be
+// serialized externally.
+
+// Performance considerations:
+// Works well on sparse graphs, poorly on dense graphs.
+// Extra information is maintained incrementally to detect cycles quickly.
+// InsertEdge() is very fast when the edge already exists, and reasonably fast
+// otherwise.
+// FindPath() is linear in the size of the graph.
+// The current implemenation uses O(|V|+|E|) space.
+
+#include <cstdint>
+
+namespace absl {
+namespace synchronization_internal {
+
+// Opaque identifier for a graph node.
+struct GraphId {
+ uint64_t handle;
+
+ bool operator==(const GraphId& x) const { return handle == x.handle; }
+ bool operator!=(const GraphId& x) const { return handle != x.handle; }
+};
+
+// Return an invalid graph id that will never be assigned by GraphCycles.
+inline GraphId InvalidGraphId() {
+ return GraphId{0};
+}
+
+class GraphCycles {
+ public:
+ GraphCycles();
+ ~GraphCycles();
+
+ // Return the id to use for ptr, assigning one if necessary.
+ // Subsequent calls with the same ptr value will return the same id
+ // until Remove().
+ GraphId GetId(void* ptr);
+
+ // Remove "ptr" from the graph. Its corresponding node and all
+ // edges to and from it are removed.
+ void RemoveNode(void* ptr);
+
+ // Return the pointer associated with id, or nullptr if id is not
+ // currently in the graph.
+ void* Ptr(GraphId id);
+
+ // Attempt to insert an edge from source_node to dest_node. If the
+ // edge would introduce a cycle, return false without making any
+ // changes. Otherwise add the edge and return true.
+ bool InsertEdge(GraphId source_node, GraphId dest_node);
+
+ // Remove any edge that exists from source_node to dest_node.
+ void RemoveEdge(GraphId source_node, GraphId dest_node);
+
+ // Return whether node exists in the graph.
+ bool HasNode(GraphId node);
+
+ // Return whether there is an edge directly from source_node to dest_node.
+ bool HasEdge(GraphId source_node, GraphId dest_node) const;
+
+ // Return whether dest_node is reachable from source_node
+ // by following edges.
+ bool IsReachable(GraphId source_node, GraphId dest_node) const;
+
+ // Find a path from "source" to "dest". If such a path exists,
+ // place the nodes on the path in the array path[], and return
+ // the number of nodes on the path. If the path is longer than
+ // max_path_len nodes, only the first max_path_len nodes are placed
+ // in path[]. The client should compare the return value with
+ // max_path_len" to see when this occurs. If no path exists, return
+ // 0. Any valid path stored in path[] will start with "source" and
+ // end with "dest". There is no guarantee that the path is the
+ // shortest, but no node will appear twice in the path, except the
+ // source and destination node if they are identical; therefore, the
+ // return value is at most one greater than the number of nodes in
+ // the graph.
+ int FindPath(GraphId source, GraphId dest, int max_path_len,
+ GraphId path[]) const;
+
+ // Update the stack trace recorded for id with the current stack
+ // trace if the last time it was updated had a smaller priority
+ // than the priority passed on this call.
+ //
+ // *get_stack_trace is called to get the stack trace.
+ void UpdateStackTrace(GraphId id, int priority,
+ int (*get_stack_trace)(void**, int));
+
+ // Set *ptr to the beginning of the array that holds the recorded
+ // stack trace for id and return the depth of the stack trace.
+ int GetStackTrace(GraphId id, void*** ptr);
+
+ // Check internal invariants. Crashes on failure, returns true on success.
+ // Expensive: should only be called from graphcycles_test.cc.
+ bool CheckInvariants() const;
+
+ // ----------------------------------------------------
+ struct Rep;
+ private:
+ Rep *rep_; // opaque representation
+ GraphCycles(const GraphCycles&) = delete;
+ GraphCycles& operator=(const GraphCycles&) = delete;
+};
+
+} // namespace synchronization_internal
+} // namespace absl
+#endif
diff --git a/absl/synchronization/internal/graphcycles_test.cc b/absl/synchronization/internal/graphcycles_test.cc
new file mode 100644
index 0000000..734f277
--- /dev/null
+++ b/absl/synchronization/internal/graphcycles_test.cc
@@ -0,0 +1,471 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2007 Google, Inc.
+// All rights reserved.
+
+// Author: Mike Burrows
+
+// A test for the GraphCycles interface.
+
+// This test is testing a component of //third_party/absl. As written it
+// heavily uses logging, including VLOG, so this test can't ship with Abseil.
+// We're leaving it here until Abseil gets base/logging.h in a future release.
+#include "absl/synchronization/internal/graphcycles.h"
+
+#include <map>
+#include <random>
+#include <vector>
+#include <unordered_set>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+// We emulate a GraphCycles object with a node vector and an edge vector.
+// We then compare the two implementations.
+
+using Nodes = std::vector<int>;
+struct Edge {
+ int from;
+ int to;
+};
+using Edges = std::vector<Edge>;
+using RandomEngine = std::mt19937_64;
+
+// Mapping from integer index to GraphId.
+typedef std::map<int, GraphId> IdMap;
+static GraphId Get(const IdMap& id, int num) {
+ auto iter = id.find(num);
+ return (iter == id.end()) ? InvalidGraphId() : iter->second;
+}
+
+// Return whether "to" is reachable from "from".
+static bool IsReachable(Edges *edges, int from, int to,
+ std::unordered_set<int> *seen) {
+ seen->insert(from); // we are investigating "from"; don't do it again
+ if (from == to) return true;
+ for (const auto &edge : *edges) {
+ if (edge.from == from) {
+ if (edge.to == to) { // success via edge directly
+ return true;
+ } else if (seen->find(edge.to) == seen->end() && // success via edge
+ IsReachable(edges, edge.to, to, seen)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void PrintEdges(Edges *edges) {
+ ABSL_RAW_LOG(INFO, "EDGES (%zu)", edges->size());
+ for (const auto &edge : *edges) {
+ int a = edge.from;
+ int b = edge.to;
+ ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ }
+ ABSL_RAW_LOG(INFO, "---");
+}
+
+static void PrintGCEdges(Nodes *nodes, const IdMap &id, GraphCycles *gc) {
+ ABSL_RAW_LOG(INFO, "GC EDGES");
+ for (int a : *nodes) {
+ for (int b : *nodes) {
+ if (gc->HasEdge(Get(id, a), Get(id, b))) {
+ ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ }
+ }
+ }
+ ABSL_RAW_LOG(INFO, "---");
+}
+
+static void PrintTransitiveClosure(Nodes *nodes, Edges *edges) {
+ ABSL_RAW_LOG(INFO, "Transitive closure");
+ for (int a : *nodes) {
+ for (int b : *nodes) {
+ std::unordered_set<int> seen;
+ if (IsReachable(edges, a, b, &seen)) {
+ ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ }
+ }
+ }
+ ABSL_RAW_LOG(INFO, "---");
+}
+
+static void PrintGCTransitiveClosure(Nodes *nodes, const IdMap &id,
+ GraphCycles *gc) {
+ ABSL_RAW_LOG(INFO, "GC Transitive closure");
+ for (int a : *nodes) {
+ for (int b : *nodes) {
+ if (gc->IsReachable(Get(id, a), Get(id, b))) {
+ ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ }
+ }
+ }
+ ABSL_RAW_LOG(INFO, "---");
+}
+
+static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
+ GraphCycles *gc) {
+ std::unordered_set<int> seen;
+ for (const auto &a : *nodes) {
+ for (const auto &b : *nodes) {
+ seen.clear();
+ bool gc_reachable = gc->IsReachable(Get(id, a), Get(id, b));
+ bool reachable = IsReachable(edges, a, b, &seen);
+ if (gc_reachable != reachable) {
+ PrintEdges(edges);
+ PrintGCEdges(nodes, id, gc);
+ PrintTransitiveClosure(nodes, edges);
+ PrintGCTransitiveClosure(nodes, id, gc);
+ ABSL_RAW_LOG(FATAL, "gc_reachable %s reachable %s a %d b %d",
+ gc_reachable ? "true" : "false",
+ reachable ? "true" : "false", a, b);
+ }
+ }
+ }
+}
+
+static void CheckEdges(Nodes *nodes, Edges *edges, const IdMap &id,
+ GraphCycles *gc) {
+ int count = 0;
+ for (const auto &edge : *edges) {
+ int a = edge.from;
+ int b = edge.to;
+ if (!gc->HasEdge(Get(id, a), Get(id, b))) {
+ PrintEdges(edges);
+ PrintGCEdges(nodes, id, gc);
+ ABSL_RAW_LOG(FATAL, "!gc->HasEdge(%d, %d)", a, b);
+ }
+ }
+ for (const auto &a : *nodes) {
+ for (const auto &b : *nodes) {
+ if (gc->HasEdge(Get(id, a), Get(id, b))) {
+ count++;
+ }
+ }
+ }
+ if (count != edges->size()) {
+ PrintEdges(edges);
+ PrintGCEdges(nodes, id, gc);
+ ABSL_RAW_LOG(FATAL, "edges->size() %zu count %d", edges->size(), count);
+ }
+}
+
+static void CheckInvariants(const GraphCycles &gc) {
+ if (ABSL_PREDICT_FALSE(!gc.CheckInvariants()))
+ ABSL_RAW_LOG(FATAL, "CheckInvariants");
+}
+
+// Returns the index of a randomly chosen node in *nodes.
+// Requires *nodes be non-empty.
+static int RandomNode(RandomEngine* rng, Nodes *nodes) {
+ std::uniform_int_distribution<int> uniform(0, nodes->size()-1);
+ return uniform(*rng);
+}
+
+// Returns the index of a randomly chosen edge in *edges.
+// Requires *edges be non-empty.
+static int RandomEdge(RandomEngine* rng, Edges *edges) {
+ std::uniform_int_distribution<int> uniform(0, edges->size()-1);
+ return uniform(*rng);
+}
+
+// Returns the index of edge (from, to) in *edges or -1 if it is not in *edges.
+static int EdgeIndex(Edges *edges, int from, int to) {
+ int i = 0;
+ while (i != edges->size() &&
+ ((*edges)[i].from != from || (*edges)[i].to != to)) {
+ i++;
+ }
+ return i == edges->size()? -1 : i;
+}
+
+TEST(GraphCycles, RandomizedTest) {
+ int next_node = 0;
+ Nodes nodes;
+ Edges edges; // from, to
+ IdMap id;
+ GraphCycles graph_cycles;
+ static const int kMaxNodes = 7; // use <= 7 nodes to keep test short
+ static const int kDataOffset = 17; // an offset to the node-specific data
+ int n = 100000;
+ int op = 0;
+ RandomEngine rng(testing::UnitTest::GetInstance()->random_seed());
+ std::uniform_int_distribution<int> uniform(0, 5);
+
+ auto ptr = [](intptr_t i) {
+ return reinterpret_cast<void*>(i + kDataOffset);
+ };
+
+ for (int iter = 0; iter != n; iter++) {
+ for (const auto &node : nodes) {
+ ASSERT_EQ(graph_cycles.Ptr(Get(id, node)), ptr(node)) << " node " << node;
+ }
+ CheckEdges(&nodes, &edges, id, &graph_cycles);
+ CheckTransitiveClosure(&nodes, &edges, id, &graph_cycles);
+ op = uniform(rng);
+ switch (op) {
+ case 0: // Add a node
+ if (nodes.size() < kMaxNodes) {
+ int new_node = next_node++;
+ GraphId new_gnode = graph_cycles.GetId(ptr(new_node));
+ ASSERT_NE(new_gnode, InvalidGraphId());
+ id[new_node] = new_gnode;
+ ASSERT_EQ(ptr(new_node), graph_cycles.Ptr(new_gnode));
+ nodes.push_back(new_node);
+ }
+ break;
+
+ case 1: // Remove a node
+ if (nodes.size() > 0) {
+ int node_index = RandomNode(&rng, &nodes);
+ int node = nodes[node_index];
+ nodes[node_index] = nodes.back();
+ nodes.pop_back();
+ graph_cycles.RemoveNode(ptr(node));
+ ASSERT_EQ(graph_cycles.Ptr(Get(id, node)), nullptr);
+ id.erase(node);
+ int i = 0;
+ while (i != edges.size()) {
+ if (edges[i].from == node || edges[i].to == node) {
+ edges[i] = edges.back();
+ edges.pop_back();
+ } else {
+ i++;
+ }
+ }
+ }
+ break;
+
+ case 2: // Add an edge
+ if (nodes.size() > 0) {
+ int from = RandomNode(&rng, &nodes);
+ int to = RandomNode(&rng, &nodes);
+ if (EdgeIndex(&edges, nodes[from], nodes[to]) == -1) {
+ if (graph_cycles.InsertEdge(id[nodes[from]], id[nodes[to]])) {
+ Edge new_edge;
+ new_edge.from = nodes[from];
+ new_edge.to = nodes[to];
+ edges.push_back(new_edge);
+ } else {
+ std::unordered_set<int> seen;
+ ASSERT_TRUE(IsReachable(&edges, nodes[to], nodes[from], &seen))
+ << "Edge " << nodes[to] << "->" << nodes[from];
+ }
+ }
+ }
+ break;
+
+ case 3: // Remove an edge
+ if (edges.size() > 0) {
+ int i = RandomEdge(&rng, &edges);
+ int from = edges[i].from;
+ int to = edges[i].to;
+ ASSERT_EQ(i, EdgeIndex(&edges, from, to));
+ edges[i] = edges.back();
+ edges.pop_back();
+ ASSERT_EQ(-1, EdgeIndex(&edges, from, to));
+ graph_cycles.RemoveEdge(id[from], id[to]);
+ }
+ break;
+
+ case 4: // Check a path
+ if (nodes.size() > 0) {
+ int from = RandomNode(&rng, &nodes);
+ int to = RandomNode(&rng, &nodes);
+ GraphId path[2*kMaxNodes];
+ int path_len = graph_cycles.FindPath(id[nodes[from]], id[nodes[to]],
+ ABSL_ARRAYSIZE(path), path);
+ std::unordered_set<int> seen;
+ bool reachable = IsReachable(&edges, nodes[from], nodes[to], &seen);
+ bool gc_reachable =
+ graph_cycles.IsReachable(Get(id, nodes[from]), Get(id, nodes[to]));
+ ASSERT_EQ(path_len != 0, reachable);
+ ASSERT_EQ(path_len != 0, gc_reachable);
+ // In the following line, we add one because a node can appear
+ // twice, if the path is from that node to itself, perhaps via
+ // every other node.
+ ASSERT_LE(path_len, kMaxNodes + 1);
+ if (path_len != 0) {
+ ASSERT_EQ(id[nodes[from]], path[0]);
+ ASSERT_EQ(id[nodes[to]], path[path_len-1]);
+ for (int i = 1; i < path_len; i++) {
+ ASSERT_TRUE(graph_cycles.HasEdge(path[i-1], path[i]));
+ }
+ }
+ }
+ break;
+
+ case 5: // Check invariants
+ CheckInvariants(graph_cycles);
+ break;
+
+ default:
+ ABSL_RAW_LOG(FATAL, "op %d", op);
+ }
+
+ // Very rarely, test graph expansion by adding then removing many nodes.
+ std::bernoulli_distribution one_in_1024(1.0 / 1024);
+ if (one_in_1024(rng)) {
+ CheckEdges(&nodes, &edges, id, &graph_cycles);
+ CheckTransitiveClosure(&nodes, &edges, id, &graph_cycles);
+ for (int i = 0; i != 256; i++) {
+ int new_node = next_node++;
+ GraphId new_gnode = graph_cycles.GetId(ptr(new_node));
+ ASSERT_NE(InvalidGraphId(), new_gnode);
+ id[new_node] = new_gnode;
+ ASSERT_EQ(ptr(new_node), graph_cycles.Ptr(new_gnode));
+ for (const auto &node : nodes) {
+ ASSERT_NE(node, new_node);
+ }
+ nodes.push_back(new_node);
+ }
+ for (int i = 0; i != 256; i++) {
+ ASSERT_GT(nodes.size(), 0);
+ int node_index = RandomNode(&rng, &nodes);
+ int node = nodes[node_index];
+ nodes[node_index] = nodes.back();
+ nodes.pop_back();
+ graph_cycles.RemoveNode(ptr(node));
+ id.erase(node);
+ int j = 0;
+ while (j != edges.size()) {
+ if (edges[j].from == node || edges[j].to == node) {
+ edges[j] = edges.back();
+ edges.pop_back();
+ } else {
+ j++;
+ }
+ }
+ }
+ CheckInvariants(graph_cycles);
+ }
+ }
+}
+
+class GraphCyclesTest : public ::testing::Test {
+ public:
+ IdMap id_;
+ GraphCycles g_;
+
+ static void* Ptr(int i) {
+ return reinterpret_cast<void*>(static_cast<uintptr_t>(i));
+ }
+
+ static int Num(void* ptr) {
+ return static_cast<int>(reinterpret_cast<uintptr_t>(ptr));
+ }
+
+ // Test relies on ith NewNode() call returning Node numbered i
+ GraphCyclesTest() {
+ for (int i = 0; i < 100; i++) {
+ id_[i] = g_.GetId(Ptr(i));
+ }
+ CheckInvariants(g_);
+ }
+
+ bool AddEdge(int x, int y) {
+ return g_.InsertEdge(Get(id_, x), Get(id_, y));
+ }
+
+ void AddMultiples() {
+ // For every node x > 0: add edge to 2*x, 3*x
+ for (int x = 1; x < 25; x++) {
+ EXPECT_TRUE(AddEdge(x, 2*x)) << x;
+ EXPECT_TRUE(AddEdge(x, 3*x)) << x;
+ }
+ CheckInvariants(g_);
+ }
+
+ std::string Path(int x, int y) {
+ GraphId path[5];
+ int np = g_.FindPath(Get(id_, x), Get(id_, y), ABSL_ARRAYSIZE(path), path);
+ std::string result;
+ for (int i = 0; i < np; i++) {
+ if (i >= ABSL_ARRAYSIZE(path)) {
+ result += " ...";
+ break;
+ }
+ if (!result.empty()) result.push_back(' ');
+ char buf[20];
+ snprintf(buf, sizeof(buf), "%d", Num(g_.Ptr(path[i])));
+ result += buf;
+ }
+ return result;
+ }
+};
+
+TEST_F(GraphCyclesTest, NoCycle) {
+ AddMultiples();
+ CheckInvariants(g_);
+}
+
+TEST_F(GraphCyclesTest, SimpleCycle) {
+ AddMultiples();
+ EXPECT_FALSE(AddEdge(8, 4));
+ EXPECT_EQ("4 8", Path(4, 8));
+ CheckInvariants(g_);
+}
+
+TEST_F(GraphCyclesTest, IndirectCycle) {
+ AddMultiples();
+ EXPECT_TRUE(AddEdge(16, 9));
+ CheckInvariants(g_);
+ EXPECT_FALSE(AddEdge(9, 2));
+ EXPECT_EQ("2 4 8 16 9", Path(2, 9));
+ CheckInvariants(g_);
+}
+
+TEST_F(GraphCyclesTest, LongPath) {
+ ASSERT_TRUE(AddEdge(2, 4));
+ ASSERT_TRUE(AddEdge(4, 6));
+ ASSERT_TRUE(AddEdge(6, 8));
+ ASSERT_TRUE(AddEdge(8, 10));
+ ASSERT_TRUE(AddEdge(10, 12));
+ ASSERT_FALSE(AddEdge(12, 2));
+ EXPECT_EQ("2 4 6 8 10 ...", Path(2, 12));
+ CheckInvariants(g_);
+}
+
+TEST_F(GraphCyclesTest, RemoveNode) {
+ ASSERT_TRUE(AddEdge(1, 2));
+ ASSERT_TRUE(AddEdge(2, 3));
+ ASSERT_TRUE(AddEdge(3, 4));
+ ASSERT_TRUE(AddEdge(4, 5));
+ g_.RemoveNode(g_.Ptr(id_[3]));
+ id_.erase(3);
+ ASSERT_TRUE(AddEdge(5, 1));
+}
+
+TEST_F(GraphCyclesTest, ManyEdges) {
+ const int N = 50;
+ for (int i = 0; i < N; i++) {
+ for (int j = 1; j < N; j++) {
+ ASSERT_TRUE(AddEdge(i, i+j));
+ }
+ }
+ CheckInvariants(g_);
+ ASSERT_TRUE(AddEdge(2*N-1, 0));
+ CheckInvariants(g_);
+ ASSERT_FALSE(AddEdge(10, 9));
+ CheckInvariants(g_);
+}
+
+} // namespace synchronization_internal
+} // namespace absl
diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h
new file mode 100644
index 0000000..a83c427
--- /dev/null
+++ b/absl/synchronization/internal/kernel_timeout.h
@@ -0,0 +1,147 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// An optional absolute timeout, with nanosecond granularity,
+// compatible with absl::Time. Suitable for in-register
+// parameter-passing (e.g. syscalls.)
+// Constructible from a absl::Time (for a timeout to be respected) or {}
+// (for "no timeout".)
+// This is a private low-level API for use by a handful of low-level
+// components that are friends of this class. Higher-level components
+// should build APIs based on absl::Time and absl::Duration.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
+
+#ifdef _WIN32
+#include <intsafe.h>
+#endif
+#include <time.h>
+#include <algorithm>
+#include <limits>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+class Waiter;
+
+class KernelTimeout {
+ public:
+ // A timeout that should expire at <t>. Any value, in the full
+ // InfinitePast() to InfiniteFuture() range, is valid here and will be
+ // respected.
+ explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
+ // No timeout.
+ KernelTimeout() : ns_(0) {}
+
+ // A more explicit factory for those who prefer it. Equivalent to {}.
+ static KernelTimeout Never() { return {}; }
+
+ // We explicitly do not support other custom formats: timespec, int64_t nanos.
+ // Unify on this and absl::Time, please.
+ bool has_timeout() const { return ns_ != 0; }
+
+ private:
+ // internal rep, not user visible: ns after unix epoch.
+ // zero = no timeout.
+ // Negative we treat as an unlikely (and certainly expired!) but valid
+ // timeout.
+ int64_t ns_;
+
+ static int64_t MakeNs(absl::Time t) {
+ // optimization--InfiniteFuture is common "no timeout" value
+ // and cheaper to compare than convert.
+ if (t == absl::InfiniteFuture()) return 0;
+ int64_t x = ToUnixNanos(t);
+
+ // A timeout that lands exactly on the epoch (x=0) needs to be respected,
+ // so we alter it unnoticably to 1. Negative timeouts are in
+ // theory supported, but handled poorly by the kernel (long
+ // delays) so push them forward too; since all such times have
+ // already passed, it's indistinguishable.
+ if (x <= 0) x = 1;
+ // A time larger than what can be represented to the kernel is treated
+ // as no timeout.
+ if (x == std::numeric_limits<int64_t>::max()) x = 0;
+ return x;
+ }
+
+ // Convert to parameter for sem_timedwait/futex/similar. Only for approved
+ // users. Do not call if !has_timeout.
+ struct timespec MakeAbsTimespec() {
+ int64_t n = ns_;
+ static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
+ if (n == 0) {
+ ABSL_RAW_LOG(
+ ERROR,
+ "Tried to create a timespec from a non-timeout; never do this.");
+ // But we'll try to continue sanely. no-timeout ~= saturated timeout.
+ n = std::numeric_limits<int64_t>::max();
+ }
+
+ // Kernel APIs validate timespecs as being at or after the epoch,
+ // despite the kernel time type being signed. However, no one can
+ // tell the difference between a timeout at or before the epoch (since
+ // all such timeouts have expired!)
+ if (n < 0) n = 0;
+
+ struct timespec abstime;
+ int64_t seconds = std::min(n / kNanosPerSecond,
+ int64_t{std::numeric_limits<time_t>::max()});
+ abstime.tv_sec = static_cast<time_t>(seconds);
+ abstime.tv_nsec =
+ static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
+ return abstime;
+ }
+
+#ifdef _WIN32
+ // Converts to milliseconds from now, or INFINITE when
+ // !has_timeout(). For use by SleepConditionVariableSRW on
+ // Windows. Callers should recognize that the return value is a
+ // relative duration (it should be recomputed by calling this method
+ // in the case of a spurious wakeup).
+ DWORD InMillisecondsFromNow() const {
+ if (!has_timeout()) {
+ return INFINITE;
+ }
+ // The use of absl::Now() to convert from absolute time to
+ // relative time means that absl::Now() cannot use anything that
+ // depends on KernelTimeout (for example, Mutex) on Windows.
+ int64_t now = ToUnixNanos(absl::Now());
+ if (ns_ >= now) {
+ // Round up so that Now() + ms_from_now >= ns_.
+ constexpr uint64_t max_nanos =
+ std::numeric_limits<int64_t>::max() - 999999u;
+ uint64_t ms_from_now =
+ (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u;
+ if (ms_from_now > std::numeric_limits<DWORD>::max()) {
+ return INFINITE;
+ }
+ return static_cast<DWORD>(ms_from_now);
+ }
+ return 0;
+ }
+#endif
+
+ friend class Waiter;
+};
+
+} // namespace synchronization_internal
+} // namespace absl
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
diff --git a/absl/synchronization/internal/mutex_nonprod.cc b/absl/synchronization/internal/mutex_nonprod.cc
new file mode 100644
index 0000000..94be54b
--- /dev/null
+++ b/absl/synchronization/internal/mutex_nonprod.cc
@@ -0,0 +1,311 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implementation of a small subset of Mutex and CondVar functionality
+// for platforms where the production implementation hasn't been fully
+// ported yet.
+
+#include "absl/synchronization/mutex.h"
+
+#if defined(_WIN32)
+#include <chrono> // NOLINT(build/c++11)
+#else
+#include <sys/time.h>
+#include <time.h>
+#endif
+
+#include <algorithm>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/time/time.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+namespace {
+
+// Return the current time plus the timeout.
+absl::Time DeadlineFromTimeout(absl::Duration timeout) {
+ return absl::Now() + timeout;
+}
+
+// Limit the deadline to a positive, 32-bit time_t value to accommodate
+// implementation restrictions. This also deals with InfinitePast and
+// InfiniteFuture.
+absl::Time LimitedDeadline(absl::Time deadline) {
+ deadline = std::max(absl::FromTimeT(0), deadline);
+ deadline = std::min(deadline, absl::FromTimeT(0x7fffffff));
+ return deadline;
+}
+
+} // namespace
+
+#if defined(_WIN32)
+
+MutexImpl::MutexImpl() {}
+
+MutexImpl::~MutexImpl() {
+ if (locked_) {
+ std_mutex_.unlock();
+ }
+}
+
+void MutexImpl::Lock() {
+ std_mutex_.lock();
+ locked_ = true;
+}
+
+bool MutexImpl::TryLock() {
+ bool locked = std_mutex_.try_lock();
+ if (locked) locked_ = true;
+ return locked;
+}
+
+void MutexImpl::Unlock() {
+ locked_ = false;
+ released_.SignalAll();
+ std_mutex_.unlock();
+}
+
+CondVarImpl::CondVarImpl() {}
+
+CondVarImpl::~CondVarImpl() {}
+
+void CondVarImpl::Signal() { std_cv_.notify_one(); }
+
+void CondVarImpl::SignalAll() { std_cv_.notify_all(); }
+
+void CondVarImpl::Wait(MutexImpl* mu) {
+ mu->released_.SignalAll();
+ std_cv_.wait(mu->std_mutex_);
+}
+
+bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
+ mu->released_.SignalAll();
+ time_t when = ToTimeT(deadline);
+ int64_t nanos = ToInt64Nanoseconds(deadline - absl::FromTimeT(when));
+ std::chrono::system_clock::time_point deadline_tp =
+ std::chrono::system_clock::from_time_t(when) +
+ std::chrono::duration_cast<std::chrono::system_clock::duration>(
+ std::chrono::nanoseconds(nanos));
+ auto deadline_since_epoch =
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ deadline_tp - std::chrono::system_clock::from_time_t(0));
+ return std_cv_.wait_until(mu->std_mutex_, deadline_tp) ==
+ std::cv_status::timeout;
+}
+
+#else // ! _WIN32
+
+MutexImpl::MutexImpl() {
+ ABSL_RAW_CHECK(pthread_mutex_init(&pthread_mutex_, nullptr) == 0,
+ "pthread error");
+}
+
+MutexImpl::~MutexImpl() {
+ if (locked_) {
+ ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
+ }
+ ABSL_RAW_CHECK(pthread_mutex_destroy(&pthread_mutex_) == 0, "pthread error");
+}
+
+void MutexImpl::Lock() {
+ ABSL_RAW_CHECK(pthread_mutex_lock(&pthread_mutex_) == 0, "pthread error");
+ locked_ = true;
+}
+
+bool MutexImpl::TryLock() {
+ bool locked = (0 == pthread_mutex_trylock(&pthread_mutex_));
+ if (locked) locked_ = true;
+ return locked;
+}
+
+void MutexImpl::Unlock() {
+ locked_ = false;
+ released_.SignalAll();
+ ABSL_RAW_CHECK(pthread_mutex_unlock(&pthread_mutex_) == 0, "pthread error");
+}
+
+CondVarImpl::CondVarImpl() {
+ ABSL_RAW_CHECK(pthread_cond_init(&pthread_cv_, nullptr) == 0,
+ "pthread error");
+}
+
+CondVarImpl::~CondVarImpl() {
+ ABSL_RAW_CHECK(pthread_cond_destroy(&pthread_cv_) == 0, "pthread error");
+}
+
+void CondVarImpl::Signal() {
+ ABSL_RAW_CHECK(pthread_cond_signal(&pthread_cv_) == 0, "pthread error");
+}
+
+void CondVarImpl::SignalAll() {
+ ABSL_RAW_CHECK(pthread_cond_broadcast(&pthread_cv_) == 0, "pthread error");
+}
+
+void CondVarImpl::Wait(MutexImpl* mu) {
+ mu->released_.SignalAll();
+ ABSL_RAW_CHECK(pthread_cond_wait(&pthread_cv_, &mu->pthread_mutex_) == 0,
+ "pthread error");
+}
+
+bool CondVarImpl::WaitWithDeadline(MutexImpl* mu, absl::Time deadline) {
+ mu->released_.SignalAll();
+ struct timespec ts = ToTimespec(deadline);
+ int rc = pthread_cond_timedwait(&pthread_cv_, &mu->pthread_mutex_, &ts);
+ if (rc == ETIMEDOUT) return true;
+ ABSL_RAW_CHECK(rc == 0, "pthread error");
+ return false;
+}
+
+#endif // ! _WIN32
+
+void MutexImpl::Await(const Condition& cond) {
+ if (cond.Eval()) return;
+ released_.SignalAll();
+ do {
+ released_.Wait(this);
+ } while (!cond.Eval());
+}
+
+bool MutexImpl::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
+ if (cond.Eval()) return true;
+ released_.SignalAll();
+ while (true) {
+ if (released_.WaitWithDeadline(this, deadline)) return false;
+ if (cond.Eval()) return true;
+ }
+}
+
+} // namespace synchronization_internal
+
+Mutex::Mutex() {}
+
+Mutex::~Mutex() {}
+
+void Mutex::Lock() { impl()->Lock(); }
+
+void Mutex::Unlock() { impl()->Unlock(); }
+
+bool Mutex::TryLock() { return impl()->TryLock(); }
+
+void Mutex::ReaderLock() { Lock(); }
+
+void Mutex::ReaderUnlock() { Unlock(); }
+
+void Mutex::Await(const Condition& cond) { impl()->Await(cond); }
+
+void Mutex::LockWhen(const Condition& cond) {
+ Lock();
+ Await(cond);
+}
+
+bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
+ return impl()->AwaitWithDeadline(
+ cond, synchronization_internal::LimitedDeadline(deadline));
+}
+
+bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
+ return AwaitWithDeadline(
+ cond, synchronization_internal::DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
+ Lock();
+ return AwaitWithDeadline(cond, deadline);
+}
+
+bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
+ return LockWhenWithDeadline(
+ cond, synchronization_internal::DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
+ absl::Duration timeout) {
+ return LockWhenWithTimeout(cond, timeout);
+}
+bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
+ absl::Time deadline) {
+ return LockWhenWithDeadline(cond, deadline);
+}
+
+void Mutex::EnableDebugLog(const char*) {}
+void Mutex::EnableInvariantDebugging(void (*)(void*), void*) {}
+void Mutex::ForgetDeadlockInfo() {}
+void Mutex::AssertHeld() const {}
+void Mutex::AssertReaderHeld() const {}
+void Mutex::AssertNotHeld() const {}
+
+CondVar::CondVar() {}
+
+CondVar::~CondVar() {}
+
+void CondVar::Signal() { impl()->Signal(); }
+
+void CondVar::SignalAll() { impl()->SignalAll(); }
+
+void CondVar::Wait(Mutex* mu) { return impl()->Wait(mu->impl()); }
+
+bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
+ return impl()->WaitWithDeadline(
+ mu->impl(), synchronization_internal::LimitedDeadline(deadline));
+}
+
+bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
+ return WaitWithDeadline(mu, absl::Now() + timeout);
+}
+
+void CondVar::EnableDebugLog(const char*) {}
+
+#ifdef THREAD_SANITIZER
+extern "C" void __tsan_read1(void *addr);
+#else
+#define __tsan_read1(addr) // do nothing if TSan not enabled
+#endif
+
+// A function that just returns its argument, dereferenced
+static bool Dereference(void *arg) {
+ // ThreadSanitizer does not instrument this file for memory accesses.
+ // This function dereferences a user variable that can participate
+ // in a data race, so we need to manually tell TSan about this memory access.
+ __tsan_read1(arg);
+ return *(static_cast<bool *>(arg));
+}
+
+Condition::Condition() {} // null constructor, used for kTrue only
+const Condition Condition::kTrue;
+
+Condition::Condition(bool (*func)(void *), void *arg)
+ : eval_(&CallVoidPtrFunction),
+ function_(func),
+ method_(nullptr),
+ arg_(arg) {}
+
+bool Condition::CallVoidPtrFunction(const Condition *c) {
+ return (*c->function_)(c->arg_);
+}
+
+Condition::Condition(const bool *cond)
+ : eval_(CallVoidPtrFunction),
+ function_(Dereference),
+ method_(nullptr),
+ // const_cast is safe since Dereference does not modify arg
+ arg_(const_cast<bool *>(cond)) {}
+
+bool Condition::Eval() const {
+ // eval_ == null for kTrue
+ return (this->eval_ == nullptr) || (*this->eval_)(this);
+}
+
+} // namespace absl
diff --git a/absl/synchronization/internal/mutex_nonprod.inc b/absl/synchronization/internal/mutex_nonprod.inc
new file mode 100644
index 0000000..51441b2
--- /dev/null
+++ b/absl/synchronization/internal/mutex_nonprod.inc
@@ -0,0 +1,256 @@
+// Do not include. This is an implementation detail of base/mutex.h.
+//
+// Declares three classes:
+//
+// base::internal::MutexImpl - implementation helper for Mutex
+// base::internal::CondVarImpl - implementation helper for CondVar
+// base::internal::SynchronizationStorage<T> - implementation helper for
+// Mutex, CondVar
+
+#include <type_traits>
+
+#if defined(_WIN32)
+#include <condition_variable>
+#include <mutex>
+#else
+#include <pthread.h>
+#endif
+
+#include "absl/base/call_once.h"
+#include "absl/time/time.h"
+
+// Declare that Mutex::ReaderLock is actually Lock(). Intended primarily
+// for tests, and even then as a last resort.
+#ifdef ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
+#error ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE cannot be directly set
+#else
+#define ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE 1
+#endif
+
+// Declare that Mutex::EnableInvariantDebugging is not implemented.
+// Intended primarily for tests, and even then as a last resort.
+#ifdef ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED
+#error ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED cannot be directly set
+#else
+#define ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED 1
+#endif
+
+namespace absl {
+class Condition;
+
+namespace synchronization_internal {
+
+class MutexImpl;
+
+// Do not use this implementation detail of CondVar. Provides most of the
+// implementation, but should not be placed directly in static storage
+// because it will not linker initialize properly. See
+// SynchronizationStorage<T> below for what we mean by linker
+// initialization.
+class CondVarImpl {
+ public:
+ CondVarImpl();
+ CondVarImpl(const CondVarImpl&) = delete;
+ CondVarImpl& operator=(const CondVarImpl&) = delete;
+ ~CondVarImpl();
+
+ void Signal();
+ void SignalAll();
+ void Wait(MutexImpl* mutex);
+ bool WaitWithDeadline(MutexImpl* mutex, absl::Time deadline);
+
+ private:
+#if defined(_WIN32)
+ std::condition_variable_any std_cv_;
+#else
+ pthread_cond_t pthread_cv_;
+#endif
+};
+
+// Do not use this implementation detail of Mutex. Provides most of the
+// implementation, but should not be placed directly in static storage
+// because it will not linker initialize properly. See
+// SynchronizationStorage<T> below for what we mean by linker
+// initialization.
+class MutexImpl {
+ public:
+ MutexImpl();
+ MutexImpl(const MutexImpl&) = delete;
+ MutexImpl& operator=(const MutexImpl&) = delete;
+ ~MutexImpl();
+
+ void Lock();
+ bool TryLock();
+ void Unlock();
+ void Await(const Condition& cond);
+ bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
+
+ private:
+ friend class CondVarImpl;
+
+#if defined(_WIN32)
+ std::mutex std_mutex_;
+#else
+ pthread_mutex_t pthread_mutex_;
+#endif
+
+ // True if the underlying mutex is locked. If the destructor is entered
+ // while locked_, the underlying mutex is unlocked. Mutex supports
+ // destruction while locked, but the same is undefined behavior for both
+ // pthread_mutex_t and std::mutex.
+ bool locked_ = false;
+
+ // Signaled before releasing the lock, in support of Await.
+ CondVarImpl released_;
+};
+
+// Do not use this implementation detail of CondVar and Mutex. A storage
+// space for T that supports a base::LinkerInitialized constructor. T must
+// have a default constructor, which is called by the first call to
+// get(). T's destructor is never called if the base::LinkerInitialized
+// constructor is called.
+//
+// Objects constructed with the default constructor are constructed and
+// destructed like any other object, and should never be allocated in
+// static storage.
+//
+// Objects constructed with the base::LinkerInitialized constructor should
+// always be in static storage. For such objects, calls to get() are always
+// valid, except from signal handlers.
+//
+// Note that this implementation relies on undefined language behavior that
+// are known to hold for the set of supported compilers. An analysis
+// follows.
+//
+// From the C++11 standard:
+//
+// [basic.life] says an object has non-trivial initialization if it is of
+// class type and it is initialized by a constructor other than a trivial
+// default constructor. (the base::LinkerInitialized constructor is
+// non-trivial)
+//
+// [basic.life] says the lifetime of an object with a non-trivial
+// constructor begins when the call to the constructor is complete.
+//
+// [basic.life] says the lifetime of an object with non-trivial destructor
+// ends when the call to the destructor begins.
+//
+// [basic.life] p5 specifies undefined behavior when accessing non-static
+// members of an instance outside its
+// lifetime. (SynchronizationStorage::get() access non-static members)
+//
+// So, base::LinkerInitialized object of SynchronizationStorage uses a
+// non-trivial constructor, which is called at some point during dynamic
+// initialization, and is therefore subject to order of dynamic
+// initialization bugs, where get() is called before the object's
+// constructor is, resulting in undefined behavior.
+//
+// Similarly, a base::LinkerInitialized SynchronizationStorage object has a
+// non-trivial destructor, and so its lifetime ends at some point during
+// destruction of objects with static storage duration [basic.start.term]
+// p4. There is a window where other exit code could call get() after this
+// occurs, resulting in undefined behavior.
+//
+// Combined, these statements imply that base::LinkerInitialized instances
+// of SynchronizationStorage<T> rely on undefined behavior.
+//
+// However, in practice, the implementation works on all supported
+// compilers. Specifically, we rely on:
+//
+// a) zero-initialization being sufficient to initialize
+// base::LinkerInitialized instances for the purposes of calling
+// get(), regardless of when the constructor is called. This is
+// because the is_dynamic_ boolean is correctly zero-initialized to
+// false.
+//
+// b) the base::LinkerInitialized constructor is a NOP, and immaterial to
+// even to concurrent calls to get().
+//
+// c) the destructor being a NOP for base::LinkerInitialized objects
+// (guaranteed by a check for !is_dynamic_), and so any concurrent and
+// subsequent calls to get() functioning as if the destructor were not
+// called, by virtue of the instances' storage remaining valid after the
+// destructor runs.
+//
+// d) That a-c apply transitively when SynchronizationStorage<T> is the
+// only member of a class allocated in static storage.
+//
+// Nothing in the language standard guarantees that a-d hold. In practice,
+// these hold in all supported compilers.
+//
+// Future direction:
+//
+// Ideally, we would simply use std::mutex or a similar class, which when
+// allocated statically would support use immediately after static
+// initialization up until static storage is reclaimed (i.e. the properties
+// we require of all "linker initialized" instances).
+//
+// Regarding construction in static storage, std::mutex is required to
+// provide a constexpr default constructor [thread.mutex.class], which
+// ensures the instance's lifetime begins with static initialization
+// [basic.start.init], and so is immune to any problems caused by the order
+// of dynamic initialization. However, as of this writing Microsoft's
+// Visual Studio does not provide a constexpr constructor for std::mutex.
+// See
+// https://blogs.msdn.microsoft.com/vcblog/2015/06/02/constexpr-complete-for-vs-2015-rtm-c11-compiler-c17-stl/
+//
+// Regarding destruction of instances in static storage, [basic.life] does
+// say an object ends when storage in which the occupies is released, in
+// the case of non-trivial destructor. However, std::mutex is not specified
+// to have a trivial destructor.
+//
+// So, we would need a class with a constexpr default constructor and a
+// trivial destructor. Today, we can achieve neither desired property using
+// std::mutex directly.
+template <typename T>
+class SynchronizationStorage {
+ public:
+ // Instances allocated on the heap or on the stack should use the default
+ // constructor.
+ SynchronizationStorage()
+ : is_dynamic_(true), once_() {}
+
+ // Instances allocated in static storage (not on the heap, not on the
+ // stack) should use this constructor.
+ explicit SynchronizationStorage(base::LinkerInitialized) {}
+
+ SynchronizationStorage(SynchronizationStorage&) = delete;
+ SynchronizationStorage& operator=(SynchronizationStorage&) = delete;
+
+ ~SynchronizationStorage() {
+ if (is_dynamic_) {
+ get()->~T();
+ }
+ }
+
+ // Retrieve the object in storage. This is fast and thread safe, but does
+ // incur the cost of absl::call_once().
+ //
+ // For instances in static storage constructed with the
+ // base::LinkerInitialized constructor, may be called at any time without
+ // regard for order of dynamic initialization or destruction of objects
+ // in static storage. See the class comment for caveats.
+ T* get() {
+ absl::call_once(once_, SynchronizationStorage::Construct, this);
+ return reinterpret_cast<T*>(&space_);
+ }
+
+ private:
+ static void Construct(SynchronizationStorage<T>* self) {
+ new (&self->space_) T();
+ }
+
+ // When true, T's destructor is run when this is destructed.
+ //
+ // The base::LinkerInitialized constructor assumes this value will be set
+ // false by static initialization.
+ bool is_dynamic_;
+
+ absl::once_flag once_;
+
+ // An aligned space for T.
+ typename std::aligned_storage<sizeof(T), alignof(T)>::type space_;
+};
+
+} // namespace synchronization_internal
+} // namespace absl
diff --git a/absl/synchronization/internal/per_thread_sem.cc b/absl/synchronization/internal/per_thread_sem.cc
new file mode 100644
index 0000000..af87222
--- /dev/null
+++ b/absl/synchronization/internal/per_thread_sem.cc
@@ -0,0 +1,106 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a no-op if the required LowLevelAlloc support is missing.
+#include "absl/base/internal/low_level_alloc.h"
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/malloc_extension.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/waiter.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ identity->blocked_count_ptr = counter;
+}
+
+std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
+ base_internal::ThreadIdentity *identity;
+ identity = GetOrCreateCurrentThreadIdentity();
+ return identity->blocked_count_ptr;
+}
+
+void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
+ Waiter::GetWaiter(identity)->Init();
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+}
+
+void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
+ const int ticker =
+ identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
+ // Wakeup the waiting thread since it is time for it to become idle.
+ Waiter::GetWaiter(identity)->Poke();
+ }
+}
+
+} // namespace synchronization_internal
+} // namespace absl
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalPerThreadSemPost(
+ absl::base_internal::ThreadIdentity *identity) {
+ absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
+}
+
+ABSL_ATTRIBUTE_WEAK bool AbslInternalPerThreadSemWait(
+ absl::synchronization_internal::KernelTimeout t) {
+ bool timeout = false;
+ absl::base_internal::ThreadIdentity *identity;
+ identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+
+ // Ensure wait_start != 0.
+ int ticker = identity->ticker.load(std::memory_order_relaxed);
+ identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ // Increment count of threads blocked in a given thread pool.
+ identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
+ }
+
+ timeout =
+ !absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
+
+ if (identity->blocked_count_ptr != nullptr) {
+ identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
+ }
+
+ if (identity->is_idle.load(std::memory_order_relaxed)) {
+ // We became idle during the wait; become non-idle again so that
+ // performance of deallocations done from now on does not suffer.
+ absl::base_internal::MallocExtension::instance()->MarkThreadBusy();
+ }
+ identity->is_idle.store(false, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ return !timeout;
+}
+
+} // extern "C"
+
+#endif // ABSL_LOW_LEVEL_ALLOC_MISSING
diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h
new file mode 100644
index 0000000..678b69e
--- /dev/null
+++ b/absl/synchronization/internal/per_thread_sem.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// PerThreadSem is a low-level synchronization primitive controlling the
+// runnability of a single thread, used internally by Mutex and CondVar.
+//
+// This is NOT a general-purpose synchronization mechanism, and should not be
+// used directly by applications. Applications should use Mutex and CondVar.
+//
+// The semantics of PerThreadSem are the same as that of a counting semaphore.
+// Each thread maintains an abstract "count" value associated with its identity.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
+
+#include <atomic>
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+
+class Mutex;
+
+namespace synchronization_internal {
+
+class PerThreadSem {
+ public:
+ PerThreadSem() = delete;
+ PerThreadSem(const PerThreadSem&) = delete;
+ PerThreadSem& operator=(const PerThreadSem&) = delete;
+
+ // Routine invoked periodically (once a second) by a background thread.
+ // Has no effect on user-visible state.
+ static void Tick(base_internal::ThreadIdentity* identity);
+
+ // ---------------------------------------------------------------------------
+ // Routines used by autosizing threadpools to detect when threads are
+ // blocked. Each thread has a counter pointer, initially zero. If non-zero,
+ // the implementation atomically increments the counter when it blocks on a
+ // semaphore, a decrements it again when it wakes. This allows a threadpool
+ // to keep track of how many of its threads are blocked.
+ // SetThreadBlockedCounter() should be used only by threadpool
+ // implementations. GetThreadBlockedCounter() should be used by modules that
+ // block threads; if the pointer returned is non-zero, the location should be
+ // incremented before the thread blocks, and decremented after it wakes.
+ static void SetThreadBlockedCounter(std::atomic<int> *counter);
+ static std::atomic<int> *GetThreadBlockedCounter();
+
+ private:
+ // Create the PerThreadSem associated with "identity". Initializes count=0.
+ // REQUIRES: May only be called by ThreadIdentity.
+ static void Init(base_internal::ThreadIdentity* identity);
+
+ // Increments "identity"'s count.
+ static inline void Post(base_internal::ThreadIdentity* identity);
+
+ // Waits until either our count > 0 or t has expired.
+ // If count > 0, decrements count and returns true. Otherwise returns false.
+ // !t.has_timeout() => Wait(t) will return true.
+ static inline bool Wait(KernelTimeout t);
+
+ // White-listed callers.
+ friend class PerThreadSemTest;
+ friend class absl::Mutex;
+ friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
+};
+
+} // namespace synchronization_internal
+} // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void AbslInternalPerThreadSemPost(
+ absl::base_internal::ThreadIdentity* identity);
+bool AbslInternalPerThreadSemWait(
+ absl::synchronization_internal::KernelTimeout t);
+} // extern "C"
+
+void absl::synchronization_internal::PerThreadSem::Post(
+ absl::base_internal::ThreadIdentity* identity) {
+ AbslInternalPerThreadSemPost(identity);
+}
+
+bool absl::synchronization_internal::PerThreadSem::Wait(
+ absl::synchronization_internal::KernelTimeout t) {
+ return AbslInternalPerThreadSemWait(t);
+}
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
diff --git a/absl/synchronization/internal/per_thread_sem_test.cc b/absl/synchronization/internal/per_thread_sem_test.cc
new file mode 100644
index 0000000..1d072a7
--- /dev/null
+++ b/absl/synchronization/internal/per_thread_sem_test.cc
@@ -0,0 +1,246 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+#include <atomic>
+#include <condition_variable> // NOLINT(build/c++11)
+#include <functional>
+#include <limits>
+#include <mutex> // NOLINT(build/c++11)
+#include <string>
+#include <thread> // NOLINT(build/c++11)
+
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/malloc_extension.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/strings/str_cat.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "gtest/gtest.h"
+
+// In this test we explicitly avoid the use of synchronization
+// primitives which might use PerThreadSem, most notably absl::Mutex.
+
+namespace absl {
+namespace synchronization_internal {
+
+class SimpleSemaphore {
+ public:
+ SimpleSemaphore() : count_(0) {}
+
+ // Decrements (locks) the semaphore. If the semaphore's value is
+ // greater than zero, then the decrement proceeds, and the function
+ // returns, immediately. If the semaphore currently has the value
+ // zero, then the call blocks until it becomes possible to perform
+ // the decrement.
+ void Wait() {
+ std::unique_lock<std::mutex> lock(mu_);
+ cv_.wait(lock, [this]() { return count_ > 0; });
+ --count_;
+ cv_.notify_one();
+ }
+
+ // Increments (unlocks) the semaphore. If the semaphore's value
+ // consequently becomes greater than zero, then another thread
+ // blocked Wait() call will be woken up and proceed to lock the
+ // semaphore.
+ void Post() {
+ std::lock_guard<std::mutex> lock(mu_);
+ ++count_;
+ cv_.notify_one();
+ }
+
+ private:
+ std::mutex mu_;
+ std::condition_variable cv_;
+ int count_;
+};
+
+struct ThreadData {
+ int num_iterations; // Number of replies to send.
+ SimpleSemaphore identity2_written; // Posted by thread writing identity2.
+ base_internal::ThreadIdentity *identity1; // First Post()-er.
+ base_internal::ThreadIdentity *identity2; // First Wait()-er.
+ KernelTimeout timeout;
+};
+
+// Need friendship with PerThreadSem.
+class PerThreadSemTest : public testing::Test {
+ public:
+ static void TimingThread(ThreadData* t) {
+ t->identity2 = GetOrCreateCurrentThreadIdentity();
+ t->identity2_written.Post();
+ while (t->num_iterations--) {
+ Wait(t->timeout);
+ Post(t->identity1);
+ }
+ }
+
+ void TestTiming(const char *msg, bool timeout) {
+ static const int kNumIterations = 100;
+ ThreadData t;
+ t.num_iterations = kNumIterations;
+ t.timeout = timeout ?
+ KernelTimeout(absl::Now() + absl::Seconds(10000)) // far in the future
+ : KernelTimeout::Never();
+ t.identity1 = GetOrCreateCurrentThreadIdentity();
+
+ // We can't use the Thread class here because it uses the Mutex
+ // class which will invoke PerThreadSem, so we use std::thread instead.
+ std::thread partner_thread(std::bind(TimingThread, &t));
+
+ // Wait for our partner thread to register their identity.
+ t.identity2_written.Wait();
+
+ int64_t min_cycles = std::numeric_limits<int64_t>::max();
+ int64_t total_cycles = 0;
+ for (int i = 0; i < kNumIterations; ++i) {
+ absl::SleepFor(absl::Milliseconds(20));
+ int64_t cycles = base_internal::CycleClock::Now();
+ Post(t.identity2);
+ Wait(t.timeout);
+ cycles = base_internal::CycleClock::Now() - cycles;
+ min_cycles = std::min(min_cycles, cycles);
+ total_cycles += cycles;
+ }
+ std::string out =
+ StrCat(msg, "min cycle count=", min_cycles, " avg cycle count=",
+ absl::SixDigits(static_cast<double>(total_cycles) /
+ kNumIterations));
+ printf("%s\n", out.c_str());
+
+ partner_thread.join();
+ }
+
+ protected:
+ static void Post(base_internal::ThreadIdentity *id) {
+ PerThreadSem::Post(id);
+ }
+ static bool Wait(KernelTimeout t) {
+ return PerThreadSem::Wait(t);
+ }
+
+ // convenience overload
+ static bool Wait(absl::Time t) {
+ return Wait(KernelTimeout(t));
+ }
+
+ static void Tick(base_internal::ThreadIdentity *identity) {
+ PerThreadSem::Tick(identity);
+ }
+};
+
+namespace {
+
+TEST_F(PerThreadSemTest, WithoutTimeout) {
+ PerThreadSemTest::TestTiming("Without timeout: ", false);
+}
+
+TEST_F(PerThreadSemTest, WithTimeout) {
+ PerThreadSemTest::TestTiming("With timeout: ", true);
+}
+
+TEST_F(PerThreadSemTest, Timeouts) {
+ absl::Time timeout = absl::Now() + absl::Milliseconds(50);
+ EXPECT_FALSE(Wait(timeout));
+ EXPECT_LE(timeout, absl::Now());
+
+ absl::Time negative_timeout = absl::UnixEpoch() - absl::Milliseconds(100);
+ EXPECT_FALSE(Wait(negative_timeout));
+ EXPECT_LE(negative_timeout, absl::Now()); // trivially true :)
+
+ Post(GetOrCreateCurrentThreadIdentity());
+ // The wait here has an expired timeout, but we have a wake to consume,
+ // so this should succeed
+ EXPECT_TRUE(Wait(negative_timeout));
+}
+
+// Test that idle threads properly register themselves as such with malloc.
+TEST_F(PerThreadSemTest, Idle) {
+ // We can't use gmock because it might use synch calls. So we do it
+ // by hand, messily. I don't bother hitting every one of the
+ // MallocExtension calls because most of them won't get made
+ // anyway--if they do we can add them.
+ class MockMallocExtension : public base_internal::MallocExtension {
+ public:
+ MockMallocExtension(base_internal::MallocExtension *real,
+ base_internal::ThreadIdentity *id,
+ std::atomic<int> *idles, std::atomic<int> *busies)
+ : real_(real), id_(id), idles_(idles), busies_(busies) {}
+ void MarkThreadIdle() override {
+ if (base_internal::CurrentThreadIdentityIfPresent() != id_) {
+ return;
+ }
+ idles_->fetch_add(1, std::memory_order_relaxed);
+ }
+
+ void MarkThreadBusy() override {
+ if (base_internal::CurrentThreadIdentityIfPresent() != id_) {
+ return;
+ }
+ busies_->fetch_add(1, std::memory_order_relaxed);
+ }
+ size_t GetAllocatedSize(const void* p) override {
+ return real_->GetAllocatedSize(p);
+ }
+
+ private:
+ MallocExtension *real_;
+ base_internal::ThreadIdentity *id_;
+ std::atomic<int>* idles_;
+ std::atomic<int>* busies_;
+ };
+
+ base_internal::ThreadIdentity *id = GetOrCreateCurrentThreadIdentity();
+ std::atomic<int> idles(0);
+ std::atomic<int> busies(0);
+ base_internal::MallocExtension *old =
+ base_internal::MallocExtension::instance();
+ MockMallocExtension mock(old, id, &idles, &busies);
+ base_internal::MallocExtension::Register(&mock);
+ std::atomic<int> sync(0);
+
+ std::thread t([id, &idles, &sync]() {
+ // Wait for the main thread to begin the wait process
+ while (0 == sync.load(std::memory_order_relaxed)) {
+ SleepFor(absl::Milliseconds(1));
+ }
+ // Wait for main thread to become idle, then wake it
+ // pretend time is passing--enough of these should cause an idling.
+ for (int i = 0; i < 100; ++i) {
+ Tick(id);
+ }
+ while (0 == idles.load(std::memory_order_relaxed)) {
+ // Keep ticking, just in case.
+ Tick(id);
+ SleepFor(absl::Milliseconds(1));
+ }
+ Post(id);
+ });
+
+ idles.store(0, std::memory_order_relaxed); // In case we slept earlier.
+ sync.store(1, std::memory_order_relaxed);
+ Wait(KernelTimeout::Never());
+
+ // t will wake us once we become idle.
+ EXPECT_LT(0, busies.load(std::memory_order_relaxed));
+ t.join();
+ base_internal::MallocExtension::Register(old);
+}
+
+} // namespace
+
+} // namespace synchronization_internal
+} // namespace absl
diff --git a/absl/synchronization/internal/thread_pool.h b/absl/synchronization/internal/thread_pool.h
new file mode 100644
index 0000000..8464042
--- /dev/null
+++ b/absl/synchronization/internal/thread_pool.h
@@ -0,0 +1,90 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
+
+#include <cassert>
+#include <functional>
+#include <queue>
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+// A simple ThreadPool implementation for tests.
+class ThreadPool {
+ public:
+ explicit ThreadPool(int num_threads) {
+ for (int i = 0; i < num_threads; ++i) {
+ threads_.push_back(std::thread(&ThreadPool::WorkLoop, this));
+ }
+ }
+
+ ThreadPool(const ThreadPool &) = delete;
+ ThreadPool &operator=(const ThreadPool &) = delete;
+
+ ~ThreadPool() {
+ {
+ absl::MutexLock l(&mu_);
+ for (int i = 0; i < threads_.size(); ++i) {
+ queue_.push(nullptr); // Shutdown signal.
+ }
+ }
+ for (auto &t : threads_) {
+ t.join();
+ }
+ }
+
+ // Schedule a function to be run on a ThreadPool thread immediately.
+ void Schedule(std::function<void()> func) {
+ assert(func != nullptr);
+ absl::MutexLock l(&mu_);
+ queue_.push(std::move(func));
+ }
+
+ private:
+ bool WorkAvailable() const EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ return !queue_.empty();
+ }
+
+ void WorkLoop() {
+ while (true) {
+ std::function<void()> func;
+ {
+ absl::MutexLock l(&mu_);
+ mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable));
+ func = std::move(queue_.front());
+ queue_.pop();
+ }
+ if (func == nullptr) { // Shutdown signal.
+ break;
+ }
+ func();
+ }
+ }
+
+ absl::Mutex mu_;
+ std::queue<std::function<void()>> queue_ GUARDED_BY(mu_);
+ std::vector<std::thread> threads_;
+};
+
+} // namespace synchronization_internal
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_THREAD_POOL_H_
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
new file mode 100644
index 0000000..cd16c78
--- /dev/null
+++ b/absl/synchronization/internal/waiter.cc
@@ -0,0 +1,394 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter.h"
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <atomic>
+#include <cassert>
+
+#include "absl/base/internal/malloc_extension.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+namespace synchronization_internal {
+
+static void MaybeBecomeIdle() {
+ base_internal::ThreadIdentity *identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ assert(identity != nullptr);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ const int ticker = identity->ticker.load(std::memory_order_relaxed);
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
+ identity->is_idle.store(true, std::memory_order_relaxed);
+ base_internal::MallocExtension::instance()->MarkThreadIdle();
+ }
+}
+
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_WAIT_BITSET
+#define FUTEX_WAIT_BITSET 9
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#ifndef FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME 256
+#endif
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xFFFFFFFF
+#endif
+#endif
+
+void Waiter::Init() {
+ futex_.store(0, std::memory_order_relaxed);
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ while (true) {
+ int x = futex_.load(std::memory_order_relaxed);
+ if (x != 0) {
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ int err = 0;
+ if (t.has_timeout()) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ struct timespec abs_timeout = t.MakeAbsTimespec();
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ err = syscall(
+ SYS_futex, reinterpret_cast<int *>(&futex_),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, 0,
+ &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until woken by FUTEX_WAKE.
+ err = syscall(SYS_futex, reinterpret_cast<int *>(&futex_),
+ FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, nullptr);
+ }
+ if (err != 0) {
+ if (errno == EINTR || errno == EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (errno == ETIMEDOUT) {
+ return false; // Timeout.
+ } else {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with errno %d\n", errno);
+ }
+ }
+
+ MaybeBecomeIdle();
+ }
+}
+
+void Waiter::Post() {
+ if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waker.
+ Poke();
+ }
+}
+
+void Waiter::Poke() {
+ // Wake one thread waiting on the futex.
+ int err = syscall(SYS_futex, reinterpret_cast<int *>(&futex_),
+ FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
+ if (err < 0) {
+ ABSL_RAW_LOG(FATAL, "FUTEX_WAKE failed with errno %d\n", errno);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+
+class PthreadMutexHolder {
+ public:
+ explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+ const int err = pthread_mutex_lock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+ }
+ }
+
+ PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+ PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+ ~PthreadMutexHolder() {
+ const int err = pthread_mutex_unlock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+ }
+ }
+
+ private:
+ pthread_mutex_t *mu_;
+};
+
+void Waiter::Init() {
+ const int err = pthread_mutex_init(&mu_, 0);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_init(&cv_, 0);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+ }
+
+ waiter_count_.store(0, std::memory_order_relaxed);
+ wakeup_count_.store(0, std::memory_order_relaxed);
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ PthreadMutexHolder h(&mu_);
+ waiter_count_.fetch_add(1, std::memory_order_relaxed);
+ // Loop until we find a wakeup to consume or timeout.
+ while (true) {
+ int x = wakeup_count_.load(std::memory_order_relaxed);
+ if (x != 0) {
+ if (!wakeup_count_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ waiter_count_.fetch_sub(1, std::memory_order_relaxed);
+ return true;
+ }
+
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ const int err = pthread_cond_wait(&cv_, &mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ } else {
+ const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+ if (err == ETIMEDOUT) {
+ waiter_count_.fetch_sub(1, std::memory_order_relaxed);
+ return false;
+ }
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ }
+ MaybeBecomeIdle();
+ }
+}
+
+void Waiter::Post() {
+ wakeup_count_.fetch_add(1, std::memory_order_release);
+ Poke();
+}
+
+void Waiter::Poke() {
+ if (waiter_count_.load(std::memory_order_relaxed) == 0) {
+ return;
+ }
+ // Potentially a waker. Take the lock and check again.
+ PthreadMutexHolder h(&mu_);
+ if (waiter_count_.load(std::memory_order_relaxed) == 0) {
+ return;
+ }
+ const int err = pthread_cond_signal(&cv_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+
+void Waiter::Init() {
+ if (sem_init(&sem_, 0, 0) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+ }
+ wakeups_.store(0, std::memory_order_relaxed);
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ struct timespec abs_timeout;
+ if (t.has_timeout()) {
+ abs_timeout = t.MakeAbsTimespec();
+ }
+
+ // Loop until we timeout or consume a wakeup.
+ while (true) {
+ int x = wakeups_.load(std::memory_order_relaxed);
+ if (x != 0) {
+ if (!wakeups_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ return true;
+ }
+
+ // Nothing to consume, wait (looping on EINTR).
+ while (true) {
+ if (!t.has_timeout()) {
+ if (sem_wait(&sem_) == 0) break;
+ if (errno == EINTR) continue;
+ ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+ } else {
+ if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
+ if (errno == EINTR) continue;
+ if (errno == ETIMEDOUT) return false;
+ ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
+ }
+ }
+ MaybeBecomeIdle();
+ }
+}
+
+void Waiter::Post() {
+ wakeups_.fetch_add(1, std::memory_order_release); // Post a wakeup.
+ Poke();
+}
+
+void Waiter::Poke() {
+ if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
+ ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+ }
+}
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+
+class LockHolder {
+ public:
+ explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+ AcquireSRWLockExclusive(mu_);
+ }
+
+ LockHolder(const LockHolder&) = delete;
+ LockHolder& operator=(const LockHolder&) = delete;
+
+ ~LockHolder() {
+ ReleaseSRWLockExclusive(mu_);
+ }
+
+ private:
+ SRWLOCK* mu_;
+};
+
+void Waiter::Init() {
+ InitializeSRWLock(&mu_);
+ InitializeConditionVariable(&cv_);
+ waiter_count_.store(0, std::memory_order_relaxed);
+ wakeup_count_.store(0, std::memory_order_relaxed);
+}
+
+bool Waiter::Wait(KernelTimeout t) {
+ LockHolder h(&mu_);
+ waiter_count_.fetch_add(1, std::memory_order_relaxed);
+
+ // Loop until we find a wakeup to consume or timeout.
+ while (true) {
+ int x = wakeup_count_.load(std::memory_order_relaxed);
+ if (x != 0) {
+ if (!wakeup_count_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ waiter_count_.fetch_sub(1, std::memory_order_relaxed);
+ return true;
+ }
+
+ // No wakeups available, time to wait.
+ if (!SleepConditionVariableSRW(
+ &cv_, &mu_, t.InMillisecondsFromNow(), 0)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
+ if (err == ERROR_TIMEOUT) {
+ waiter_count_.fetch_sub(1, std::memory_order_relaxed);
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+ }
+ }
+
+ MaybeBecomeIdle();
+ }
+}
+
+void Waiter::Post() {
+ wakeup_count_.fetch_add(1, std::memory_order_release);
+ Poke();
+}
+
+void Waiter::Poke() {
+ if (waiter_count_.load(std::memory_order_relaxed) == 0) {
+ return;
+ }
+ // Potentially a waker. Take the lock and check again.
+ LockHolder h(&mu_);
+ if (waiter_count_.load(std::memory_order_relaxed) == 0) {
+ return;
+ }
+ WakeConditionVariable(&cv_);
+}
+
+#else
+#error Unknown ABSL_WAITER_MODE
+#endif
+
+} // namespace synchronization_internal
+} // namespace absl
diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h
new file mode 100644
index 0000000..025ace4
--- /dev/null
+++ b/absl/synchronization/internal/waiter.h
@@ -0,0 +1,138 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
+
+#include "absl/base/config.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <pthread.h>
+#endif
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+#endif
+
+#include <atomic>
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
+#define ABSL_WAITER_MODE_FUTEX 0
+#define ABSL_WAITER_MODE_SEM 1
+#define ABSL_WAITER_MODE_CONDVAR 2
+#define ABSL_WAITER_MODE_WIN32 3
+
+#if defined(ABSL_FORCE_WAITER_MODE)
+#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
+#elif defined(_WIN32)
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
+#elif defined(__linux__)
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
+#elif defined(ABSL_HAVE_SEMAPHORE_H)
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
+#else
+#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
+#endif
+
+namespace absl {
+namespace synchronization_internal {
+
+// Waiter is an OS-specific semaphore.
+class Waiter {
+ public:
+ // No constructor, instances use the reserved space in ThreadIdentity.
+ // All initialization logic belongs in `Init()`.
+ Waiter() = delete;
+ Waiter(const Waiter&) = delete;
+ Waiter& operator=(const Waiter&) = delete;
+
+ // Prepare any data to track waits.
+ void Init();
+
+ // Blocks the calling thread until a matching call to `Post()` or
+ // `t` has passed. Returns `true` if woken (`Post()` called),
+ // `false` on timeout.
+ bool Wait(KernelTimeout t);
+
+ // Restart the caller of `Wait()` as with a normal semaphore.
+ void Post();
+
+ // If anyone is waiting, wake them up temporarily and cause them to
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
+ // `Post()` or timeout.
+ void Poke();
+
+ // Returns the Waiter associated with the identity.
+ static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
+ static_assert(
+ sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+ "Insufficient space for Waiter");
+ return reinterpret_cast<Waiter*>(identity->waiter_state.data);
+ }
+
+ // How many periods to remain idle before releasing resources
+#ifndef THREAD_SANITIZER
+ static const int kIdlePeriods = 60;
+#else
+ // Memory consumption under ThreadSanitizer is a serious concern,
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
+ // delay before marking a thread as idle.
+ static const int kIdlePeriods = 1;
+#endif
+
+ private:
+#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+ // Futexes are defined by specification to be ints.
+ // Thus std::atomic<int> must be just an int with lockfree methods.
+ std::atomic<int> futex_;
+ static_assert(sizeof(int) == sizeof(futex_), "Wrong size for futex");
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+ pthread_mutex_t mu_;
+ pthread_cond_t cv_;
+ std::atomic<int> waiter_count_;
+ std::atomic<int> wakeup_count_; // Unclaimed wakeups, written under lock.
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
+ sem_t sem_;
+ // This seems superfluous, but for Poke() we need to cause spurious
+ // wakeups on the semaphore. Hence we can't actually use the
+ // semaphore's count.
+ std::atomic<int> wakeups_;
+
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
+ // The Windows API has lots of choices for synchronization
+ // primivitives. We are using SRWLOCK and CONDITION_VARIABLE
+ // because they don't require a destructor to release system
+ // resources.
+ SRWLOCK mu_;
+ CONDITION_VARIABLE cv_;
+ std::atomic<int> waiter_count_;
+ std::atomic<int> wakeup_count_;
+
+#else
+ #error Unknown ABSL_WAITER_MODE
+#endif
+};
+
+} // namespace synchronization_internal
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
new file mode 100644
index 0000000..cb0a3a1
--- /dev/null
+++ b/absl/synchronization/mutex.cc
@@ -0,0 +1,2680 @@
+#include "absl/synchronization/mutex.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#ifdef ERROR
+#undef ERROR
+#endif
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/time.h>
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cinttypes>
+#include <thread> // NOLINT(build/c++11)
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/low_level_alloc.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/port.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/synchronization/internal/graphcycles.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+#include "absl/time/time.h"
+
+using absl::base_internal::CurrentThreadIdentityIfPresent;
+using absl::base_internal::PerThreadSynch;
+using absl::base_internal::ThreadIdentity;
+using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
+using absl::synchronization_internal::GraphCycles;
+using absl::synchronization_internal::GraphId;
+using absl::synchronization_internal::InvalidGraphId;
+using absl::synchronization_internal::KernelTimeout;
+using absl::synchronization_internal::PerThreadSem;
+
+extern "C" {
+ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield() { std::this_thread::yield(); }
+} // extern "C"
+
+namespace absl {
+
+namespace {
+
+#if defined(THREAD_SANITIZER)
+constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kIgnore;
+#else
+constexpr OnDeadlockCycle kDeadlockDetectionDefault = OnDeadlockCycle::kAbort;
+#endif
+
+ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
+ kDeadlockDetectionDefault);
+ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
+
+// ------------------------------------------ spinlock support
+
+// Make sure read-only globals used in the Mutex code are contained on the
+// same cacheline and cacheline aligned to eliminate any false sharing with
+// other globals from this and other modules.
+static struct MutexGlobals {
+ MutexGlobals() {
+ // Find machine-specific data needed for Delay() and
+ // TryAcquireWithSpinning(). This runs in the global constructor
+ // sequence, and before that zeros are safe values.
+ num_cpus = absl::base_internal::NumCPUs();
+ spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ }
+ int num_cpus;
+ int spinloop_iterations;
+ // Pad this struct to a full cacheline to prevent false sharing.
+ char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)];
+} ABSL_CACHELINE_ALIGNED mutex_globals;
+static_assert(
+ sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE,
+ "MutexGlobals must occupy an entire cacheline to prevent false sharing");
+
+ABSL_CONST_INIT absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
+ submit_profile_data;
+ABSL_CONST_INIT absl::base_internal::AtomicHook<
+ void (*)(const char *msg, const void *obj, int64_t wait_cycles)> mutex_tracer;
+ABSL_CONST_INIT absl::base_internal::AtomicHook<
+ void (*)(const char *msg, const void *cv)> cond_var_tracer;
+ABSL_CONST_INIT absl::base_internal::AtomicHook<
+ bool (*)(const void *pc, char *out, int out_size)> symbolizer;
+
+} // namespace
+
+void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
+ submit_profile_data.Store(fn);
+}
+
+void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+ int64_t wait_cycles)) {
+ mutex_tracer.Store(fn);
+}
+
+void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+ cond_var_tracer.Store(fn);
+}
+
+void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
+ symbolizer.Store(fn);
+}
+
+// spinlock delay on iteration c. Returns new c.
+namespace {
+ enum DelayMode { AGGRESSIVE, GENTLE };
+};
+static int Delay(int32_t c, DelayMode mode) {
+ // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning is
+ // used to ensure that an Unlock() call, which must get the spin lock for
+ // any thread to make progress gets it without undue delay.
+ int32_t limit = (mutex_globals.num_cpus > 1) ?
+ ((mode == AGGRESSIVE) ? 5000 : 250) : 0;
+ if (c < limit) {
+ c++; // spin
+ } else {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(0, 0);
+ if (c == limit) { // yield once
+ AbslInternalMutexYield();
+ c++;
+ } else { // then wait
+ absl::SleepFor(absl::Microseconds(10));
+ c = 0;
+ }
+ ABSL_TSAN_MUTEX_POST_DIVERT(0, 0);
+ }
+ return (c);
+}
+
+// --------------------------Generic atomic ops
+// Ensure that "(*pv & bits) == bits" by doing an atomic update of "*pv" to
+// "*pv | bits" if necessary. Wait until (*pv & wait_until_clear)==0
+// before making any change.
+// This is used to set flags in mutex and condition variable words.
+static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
+ intptr_t wait_until_clear) {
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != bits &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v | bits,
+ std::memory_order_release,
+ std::memory_order_relaxed)));
+}
+
+// Ensure that "(*pv & bits) == 0" by doing an atomic update of "*pv" to
+// "*pv & ~bits" if necessary. Wait until (*pv & wait_until_clear)==0
+// before making any change.
+// This is used to unset flags in mutex and condition variable words.
+static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
+ intptr_t wait_until_clear) {
+ intptr_t v;
+ do {
+ v = pv->load(std::memory_order_relaxed);
+ } while ((v & bits) != 0 &&
+ ((v & wait_until_clear) != 0 ||
+ !pv->compare_exchange_weak(v, v & ~bits,
+ std::memory_order_release,
+ std::memory_order_relaxed)));
+}
+
+//------------------------------------------------------------------
+
+// Data for doing deadlock detection.
+static absl::base_internal::SpinLock deadlock_graph_mu(
+ absl::base_internal::kLinkerInitialized);
+
+// graph used to detect deadlocks.
+static GraphCycles *deadlock_graph GUARDED_BY(deadlock_graph_mu)
+ PT_GUARDED_BY(deadlock_graph_mu);
+
+//------------------------------------------------------------------
+// An event mechanism for debugging mutex use.
+// It also allows mutexes to be given names for those who can't handle
+// addresses, and instead like to give their data structures names like
+// "Henry", "Fido", or "Rupert IV, King of Yondavia".
+
+namespace { // to prevent name pollution
+enum { // Mutex and CondVar events passed as "ev" to PostSynchEvent
+ // Mutex events
+ SYNCH_EV_TRYLOCK_SUCCESS,
+ SYNCH_EV_TRYLOCK_FAILED,
+ SYNCH_EV_READERTRYLOCK_SUCCESS,
+ SYNCH_EV_READERTRYLOCK_FAILED,
+ SYNCH_EV_LOCK,
+ SYNCH_EV_LOCK_RETURNING,
+ SYNCH_EV_READERLOCK,
+ SYNCH_EV_READERLOCK_RETURNING,
+ SYNCH_EV_UNLOCK,
+ SYNCH_EV_READERUNLOCK,
+
+ // CondVar events
+ SYNCH_EV_WAIT,
+ SYNCH_EV_WAIT_RETURNING,
+ SYNCH_EV_SIGNAL,
+ SYNCH_EV_SIGNALALL,
+};
+
+enum { // Event flags
+ SYNCH_F_R = 0x01, // reader event
+ SYNCH_F_LCK = 0x02, // PostSynchEvent called with mutex held
+ SYNCH_F_ACQ = 0x04, // event is an acquire
+
+ SYNCH_F_LCK_W = SYNCH_F_LCK,
+ SYNCH_F_LCK_R = SYNCH_F_LCK | SYNCH_F_R,
+ SYNCH_F_ACQ_W = SYNCH_F_ACQ,
+ SYNCH_F_ACQ_R = SYNCH_F_ACQ | SYNCH_F_R,
+};
+} // anonymous namespace
+
+// Properties of the events.
+static const struct {
+ int flags;
+ const char *msg;
+} event_properties[] = {
+ { SYNCH_F_LCK_W|SYNCH_F_ACQ_W, "TryLock succeeded " },
+ { 0, "TryLock failed " },
+ { SYNCH_F_LCK_R|SYNCH_F_ACQ_R, "ReaderTryLock succeeded " },
+ { 0, "ReaderTryLock failed " },
+ { SYNCH_F_ACQ_W, "Lock blocking " },
+ { SYNCH_F_LCK_W, "Lock returning " },
+ { SYNCH_F_ACQ_R, "ReaderLock blocking " },
+ { SYNCH_F_LCK_R, "ReaderLock returning " },
+ { SYNCH_F_LCK_W, "Unlock " },
+ { SYNCH_F_LCK_R, "ReaderUnlock " },
+ { 0, "Wait on " },
+ { 0, "Wait unblocked " },
+ { 0, "Signal on " },
+ { 0, "SignalAll on " },
+};
+
+static absl::base_internal::SpinLock synch_event_mu(
+ absl::base_internal::kLinkerInitialized);
+// protects synch_event
+
+// Hash table size; should be prime > 2.
+// Can't be too small, as it's used for deadlock detection information.
+static const uint32_t kNSynchEvent = 1031;
+
+// We need to hide Mutexes (or other deadlock detection's pointers)
+// from the leak detector.
+static const uintptr_t kHideMask = static_cast<uintptr_t>(0xF03A5F7BF03A5F7BLL);
+static uintptr_t MaskMu(const void *mu) {
+ return reinterpret_cast<uintptr_t>(mu) ^ kHideMask;
+}
+
+static struct SynchEvent { // this is a trivial hash table for the events
+ // struct is freed when refcount reaches 0
+ int refcount GUARDED_BY(synch_event_mu);
+
+ // buckets have linear, 0-terminated chains
+ SynchEvent *next GUARDED_BY(synch_event_mu);
+
+ // Constant after initialization
+ uintptr_t masked_addr; // object at this address is called "name"
+
+ // No explicit synchronization used. Instead we assume that the
+ // client who enables/disables invariants/logging on a Mutex does so
+ // while the Mutex is not being concurrently accessed by others.
+ void (*invariant)(void *arg); // called on each event
+ void *arg; // first arg to (*invariant)()
+ bool log; // logging turned on
+
+ // Constant after initialization
+ char name[1]; // actually longer---null-terminated std::string
+} *synch_event[kNSynchEvent] GUARDED_BY(synch_event_mu);
+
+// Ensure that the object at "addr" has a SynchEvent struct associated with it,
+// set "bits" in the word there (waiting until lockbit is clear before doing
+// so), and return a refcounted reference that will remain valid until
+// UnrefSynchEvent() is called. If a new SynchEvent is allocated,
+// the std::string name is copied into it.
+// When used with a mutex, the caller should also ensure that kMuEvent
+// is set in the mutex word, and similarly for condition variables and kCVEvent.
+static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
+ const char *name, intptr_t bits,
+ intptr_t lockbit) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent *e;
+ // first look for existing SynchEvent struct..
+ synch_event_mu.Lock();
+ for (e = synch_event[h]; e != nullptr && e->masked_addr != MaskMu(addr);
+ e = e->next) {
+ }
+ if (e == nullptr) { // no SynchEvent struct found; make one.
+ if (name == nullptr) {
+ name = "";
+ }
+ size_t l = strlen(name);
+ e = reinterpret_cast<SynchEvent *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
+ e->refcount = 2; // one for return value, one for linked list
+ e->masked_addr = MaskMu(addr);
+ e->invariant = nullptr;
+ e->arg = nullptr;
+ e->log = false;
+ strcpy(e->name, name); // NOLINT(runtime/printf)
+ e->next = synch_event[h];
+ AtomicSetBits(addr, bits, lockbit);
+ synch_event[h] = e;
+ } else {
+ e->refcount++; // for return value
+ }
+ synch_event_mu.Unlock();
+ return e;
+}
+
+// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
+static void DeleteSynchEvent(SynchEvent *e) {
+ base_internal::LowLevelAlloc::Free(e);
+}
+
+// Decrement the reference count of *e, or do nothing if e==null.
+static void UnrefSynchEvent(SynchEvent *e) {
+ if (e != nullptr) {
+ synch_event_mu.Lock();
+ bool del = (--(e->refcount) == 0);
+ synch_event_mu.Unlock();
+ if (del) {
+ DeleteSynchEvent(e);
+ }
+ }
+}
+
+// Forget the mapping from the object (Mutex or CondVar) at address addr
+// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
+// is clear before doing so).
+static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+ intptr_t lockbit) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent **pe;
+ SynchEvent *e;
+ synch_event_mu.Lock();
+ for (pe = &synch_event[h];
+ (e = *pe) != nullptr && e->masked_addr != MaskMu(addr); pe = &e->next) {
+ }
+ bool del = false;
+ if (e != nullptr) {
+ *pe = e->next;
+ del = (--(e->refcount) == 0);
+ }
+ AtomicClearBits(addr, bits, lockbit);
+ synch_event_mu.Unlock();
+ if (del) {
+ DeleteSynchEvent(e);
+ }
+}
+
+// Return a refcounted reference to the SynchEvent of the object at address
+// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
+// called.
+static SynchEvent *GetSynchEvent(const void *addr) {
+ uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ SynchEvent *e;
+ synch_event_mu.Lock();
+ for (e = synch_event[h]; e != nullptr && e->masked_addr != MaskMu(addr);
+ e = e->next) {
+ }
+ if (e != nullptr) {
+ e->refcount++;
+ }
+ synch_event_mu.Unlock();
+ return e;
+}
+
+// Called when an event "ev" occurs on a Mutex of CondVar "obj"
+// if event recording is on
+static void PostSynchEvent(void *obj, int ev) {
+ SynchEvent *e = GetSynchEvent(obj);
+ // logging is on if event recording is on and either there's no event struct,
+ // or it explicitly says to log
+ if (e == nullptr || e->log) {
+ void *pcs[40];
+ int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
+ // A buffer with enough space for the ASCII for all the PCs, even on a
+ // 64-bit machine.
+ char buffer[ABSL_ARRAYSIZE(pcs) * 24];
+ int pos = snprintf(buffer, sizeof (buffer), " @");
+ for (int i = 0; i != n; i++) {
+ pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]);
+ }
+ ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj,
+ (e == nullptr ? "" : e->name), buffer);
+ }
+ if ((event_properties[ev].flags & SYNCH_F_LCK) != 0 && e != nullptr &&
+ e->invariant != nullptr) {
+ (*e->invariant)(e->arg);
+ }
+ UnrefSynchEvent(e);
+}
+
+//------------------------------------------------------------------
+
+// The SynchWaitParams struct encapsulates the way in which a thread is waiting:
+// whether it has a timeout, the condition, exclusive/shared, and whether a
+// condition variable wait has an associated Mutex (as opposed to another
+// type of lock). It also points to the PerThreadSynch struct of its thread.
+// cv_word tells Enqueue() to enqueue on a CondVar using CondVarEnqueue().
+//
+// This structure is held on the stack rather than directly in
+// PerThreadSynch because a thread can be waiting on multiple Mutexes if,
+// while waiting on one Mutex, the implementation calls a client callback
+// (such as a Condition function) that acquires another Mutex. We don't
+// strictly need to allow this, but programmers become confused if we do not
+// allow them to use functions such a LOG() within Condition functions. The
+// PerThreadSynch struct points at the most recent SynchWaitParams struct when
+// the thread is on a Mutex's waiter queue.
+struct SynchWaitParams {
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
+ KernelTimeout timeout_arg, Mutex *cvmu_arg,
+ PerThreadSynch *thread_arg,
+ std::atomic<intptr_t> *cv_word_arg)
+ : how(how_arg),
+ cond(cond_arg),
+ timeout(timeout_arg),
+ cvmu(cvmu_arg),
+ thread(thread_arg),
+ cv_word(cv_word_arg),
+ contention_start_cycles(base_internal::CycleClock::Now()) {}
+
+ const Mutex::MuHow how; // How this thread needs to wait.
+ const Condition *cond; // The condition that this thread is waiting for.
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
+ KernelTimeout timeout; // timeout expiry---absolute time
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
+ Mutex *const cvmu; // used for transfer from cond var to mutex
+ PerThreadSynch *const thread; // thread that is waiting
+
+ // If not null, thread should be enqueued on the CondVar whose state
+ // word is cv_word instead of queueing normally on the Mutex.
+ std::atomic<intptr_t> *cv_word;
+
+ int64_t contention_start_cycles; // Time (in cycles) when this thread started
+ // to contend for the mutex.
+};
+
+struct SynchLocksHeld {
+ int n; // number of valid entries in locks[]
+ bool overflow; // true iff we overflowed the array at some point
+ struct {
+ Mutex *mu; // lock acquired
+ int32_t count; // times acquired
+ GraphId id; // deadlock_graph id of acquired lock
+ } locks[40];
+ // If a thread overfills the array during deadlock detection, we
+ // continue, discarding information as needed. If no overflow has
+ // taken place, we can provide more error checking, such as
+ // detecting when a thread releases a lock it does not hold.
+};
+
+// A sentinel value in lists that is not 0.
+// A 0 value is used to mean "not on a list".
+static PerThreadSynch *const kPerThreadSynchNull =
+ reinterpret_cast<PerThreadSynch *>(1);
+
+static SynchLocksHeld *LocksHeldAlloc() {
+ SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
+ ret->n = 0;
+ ret->overflow = false;
+ return ret;
+}
+
+// Return the PerThreadSynch-struct for this thread.
+static PerThreadSynch *Synch_GetPerThread() {
+ ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+ return &identity->per_thread_synch;
+}
+
+static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ PerThreadSynch *w = Synch_GetPerThread();
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+ return w;
+}
+
+static SynchLocksHeld *Synch_GetAllLocks() {
+ PerThreadSynch *s = Synch_GetPerThread();
+ if (s->all_locks == nullptr) {
+ s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
+ }
+ return s->all_locks;
+}
+
+// Post on "w"'s associated PerThreadSem.
+inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ PerThreadSem::Post(w->thread_identity());
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+}
+
+// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
+bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+ if (mu) {
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ }
+ assert(w == Synch_GetPerThread());
+ static_cast<void>(w);
+ bool res = PerThreadSem::Wait(t);
+ if (mu) {
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ }
+ return res;
+}
+
+// We're in a fatal signal handler that hopes to use Mutex and to get
+// lucky by not deadlocking. We try to improve its chances of success
+// by effectively disabling some of the consistency checks. This will
+// prevent certain ABSL_RAW_CHECK() statements from being triggered when
+// re-rentry is detected. The ABSL_RAW_CHECK() statements are those in the
+// Mutex code checking that the "waitp" field has not been reused.
+void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
+ // Fix the per-thread state only if it exists.
+ ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+ if (identity != nullptr) {
+ identity->per_thread_synch.suppress_fatal_errors = true;
+ }
+ // Don't do deadlock detection when we are already failing.
+ synch_deadlock_detection.store(OnDeadlockCycle::kIgnore,
+ std::memory_order_release);
+}
+
+// --------------------------time support
+
+// Return the current time plus the timeout. Use the same clock as
+// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
+// such a choice when a deadline is given directly.
+static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
+#ifndef _WIN32
+ struct timeval tv;
+ gettimeofday(&tv, nullptr);
+ return absl::TimeFromTimeval(tv) + timeout;
+#else
+ return absl::Now() + timeout;
+#endif
+}
+
+// --------------------------Mutexes
+
+// In the layout below, the msb of the bottom byte is currently unused. Also,
+// the following constraints were considered in choosing the layout:
+// o Both the debug allocator's "uninitialized" and "freed" patterns (0xab and
+// 0xcd) are illegal: reader and writer lock both held.
+// o kMuWriter and kMuEvent should exceed kMuDesig and kMuWait, to enable the
+// bit-twiddling trick in Mutex::Unlock().
+// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
+// to enable the bit-twiddling trick in CheckForMutexCorruption().
+static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
+static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
+static const intptr_t kMuWait = 0x0004L; // threads are waiting
+static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+// INVARIANT1: there's a thread that was blocked on the mutex, is
+// no longer, yet has not yet acquired the mutex. If there's a
+// designated waker, all threads can avoid taking the slow path in
+// unlock because the designated waker will subsequently acquire
+// the lock and wake someone. To maintain INVARIANT1 the bit is
+// set when a thread is unblocked(INV1a), and threads that were
+// unblocked reset the bit when they either acquire or re-block
+// (INV1b).
+static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
+ // for a reader
+static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
+
+// Hack to make constant values available to gdb pretty printer
+enum {
+ kGdbMuSpin = kMuSpin,
+ kGdbMuEvent = kMuEvent,
+ kGdbMuWait = kMuWait,
+ kGdbMuWriter = kMuWriter,
+ kGdbMuDesig = kMuDesig,
+ kGdbMuWrWait = kMuWrWait,
+ kGdbMuReader = kMuReader,
+ kGdbMuLow = kMuLow,
+};
+
+// kMuWrWait implies kMuWait.
+// kMuReader and kMuWriter are mutually exclusive.
+// If kMuReader is zero, there are no readers.
+// Otherwise, if kMuWait is zero, the high order bits contain a count of the
+// number of readers. Otherwise, the reader count is held in
+// PerThreadSynch::readers of the most recently queued waiter, again in the
+// bits above kMuLow.
+static const intptr_t kMuOne = 0x0100; // a count of one reader
+
+// flags passed to Enqueue and LockSlow{,WithTimeout,Loop}
+static const int kMuHasBlocked = 0x01; // already blocked (MUST == 1)
+static const int kMuIsCond = 0x02; // conditional waiter (CV or Condition)
+
+static_assert(PerThreadSynch::kAlignment > kMuLow,
+ "PerThreadSynch::kAlignment must be greater than kMuLow");
+
+// This struct contains various bitmasks to be used in
+// acquiring and releasing a mutex in a particular mode.
+struct MuHowS {
+ // if all the bits in fast_need_zero are zero, the lock can be acquired by
+ // adding fast_add and oring fast_or. The bit kMuDesig should be reset iff
+ // this is the designated waker.
+ intptr_t fast_need_zero;
+ intptr_t fast_or;
+ intptr_t fast_add;
+
+ intptr_t slow_need_zero; // fast_need_zero with events (e.g. logging)
+
+ intptr_t slow_inc_need_zero; // if all the bits in slow_inc_need_zero are
+ // zero a reader can acquire a read share by
+ // setting the reader bit and incrementing
+ // the reader count (in last waiter since
+ // we're now slow-path). kMuWrWait be may
+ // be ignored if we already waited once.
+};
+
+static const MuHowS kSharedS = {
+ // shared or read lock
+ kMuWriter | kMuWait | kMuEvent, // fast_need_zero
+ kMuReader, // fast_or
+ kMuOne, // fast_add
+ kMuWriter | kMuWait, // slow_need_zero
+ kMuSpin | kMuWriter | kMuWrWait, // slow_inc_need_zero
+};
+static const MuHowS kExclusiveS = {
+ // exclusive or write lock
+ kMuWriter | kMuReader | kMuEvent, // fast_need_zero
+ kMuWriter, // fast_or
+ 0, // fast_add
+ kMuWriter | kMuReader, // slow_need_zero
+ ~static_cast<intptr_t>(0), // slow_inc_need_zero
+};
+static const Mutex::MuHow kShared = &kSharedS; // shared lock
+static const Mutex::MuHow kExclusive = &kExclusiveS; // exclusive lock
+
+#ifdef NDEBUG
+static constexpr bool kDebugMode = false;
+#else
+static constexpr bool kDebugMode = true;
+#endif
+
+#ifdef THREAD_SANITIZER
+static unsigned TsanFlags(Mutex::MuHow how) {
+ return how == kShared ? __tsan_mutex_read_lock : 0;
+}
+#endif
+
+Mutex::Mutex() : mu_(0) {
+ ABSL_TSAN_MUTEX_CREATE(this, 0);
+}
+
+static bool DebugOnlyIsExiting() {
+ return false;
+}
+
+Mutex::~Mutex() {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuEvent) != 0 && !DebugOnlyIsExiting()) {
+ ForgetSynchEvent(&this->mu_, kMuEvent, kMuSpin);
+ }
+ if (kDebugMode) {
+ this->ForgetDeadlockInfo();
+ }
+ ABSL_TSAN_MUTEX_DESTROY(this, 0);
+}
+
+void Mutex::EnableDebugLog(const char *name) {
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+ e->log = true;
+ UnrefSynchEvent(e);
+}
+
+void EnableMutexInvariantDebugging(bool enabled) {
+ synch_check_invariants.store(enabled, std::memory_order_release);
+}
+
+void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
+ void *arg) {
+ if (synch_check_invariants.load(std::memory_order_acquire) &&
+ invariant != nullptr) {
+ SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+ e->invariant = invariant;
+ e->arg = arg;
+ UnrefSynchEvent(e);
+ }
+}
+
+void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
+ synch_deadlock_detection.store(mode, std::memory_order_release);
+}
+
+// Return true iff threads x and y are waiting on the same condition for the
+// same type of lock. Requires that x and y be waiting on the same Mutex
+// queue.
+static bool MuSameCondition(PerThreadSynch *x, PerThreadSynch *y) {
+ return x->waitp->how == y->waitp->how &&
+ Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
+}
+
+// Given the contents of a mutex word containing a PerThreadSynch pointer,
+// return the pointer.
+static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
+ return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+}
+
+// The next several routines maintain the per-thread next and skip fields
+// used in the Mutex waiter queue.
+// The queue is a circular singly-linked list, of which the "head" is the
+// last element, and head->next if the first element.
+// The skip field has the invariant:
+// For thread x, x->skip is one of:
+// - invalid (iff x is not in a Mutex wait queue),
+// - null, or
+// - a pointer to a distinct thread waiting later in the same Mutex queue
+// such that all threads in [x, x->skip] have the same condition and
+// lock type (MuSameCondition() is true for all pairs in [x, x->skip]).
+// In addition, if x->skip is valid, (x->may_skip || x->skip == null)
+//
+// By the spec of MuSameCondition(), it is not necessary when removing the
+// first runnable thread y from the front a Mutex queue to adjust the skip
+// field of another thread x because if x->skip==y, x->skip must (have) become
+// invalid before y is removed. The function TryRemove can remove a specified
+// thread from an arbitrary position in the queue whether runnable or not, so
+// it fixes up skip fields that would otherwise be left dangling.
+// The statement
+// if (x->may_skip && MuSameCondition(x, x->next)) { x->skip = x->next; }
+// maintains the invariant provided x is not the last waiter in a Mutex queue
+// The statement
+// if (x->skip != null) { x->skip = x->skip->skip; }
+// maintains the invariant.
+
+// Returns the last thread y in a mutex waiter queue such that all threads in
+// [x, y] inclusive share the same condition. Sets skip fields of some threads
+// in that range to optimize future evaluation of Skip() on x values in
+// the range. Requires thread x is in a mutex waiter queue.
+// The locking is unusual. Skip() is called under these conditions:
+// - spinlock is held in call from Enqueue(), with maybe_unlocking == false
+// - Mutex is held in call from UnlockSlow() by last unlocker, with
+// maybe_unlocking == true
+// - both Mutex and spinlock are held in call from DequeueAllWakeable() (from
+// UnlockSlow()) and TryRemove()
+// These cases are mutually exclusive, so Skip() never runs concurrently
+// with itself on the same Mutex. The skip chain is used in these other places
+// that cannot occur concurrently:
+// - FixSkip() (from TryRemove()) - spinlock and Mutex are held)
+// - Dequeue() (with spinlock and Mutex held)
+// - UnlockSlow() (with spinlock and Mutex held)
+// A more complex case is Enqueue()
+// - Enqueue() (with spinlock held and maybe_unlocking == false)
+// This is the first case in which Skip is called, above.
+// - Enqueue() (without spinlock held; but queue is empty and being freshly
+// formed)
+// - Enqueue() (with spinlock held and maybe_unlocking == true)
+// The first case has mutual exclusion, and the second isolation through
+// working on an otherwise unreachable data structure.
+// In the last case, Enqueue() is required to change no skip/next pointers
+// except those in the added node and the former "head" node. This implies
+// that the new node is added after head, and so must be the new head or the
+// new front of the queue.
+static PerThreadSynch *Skip(PerThreadSynch *x) {
+ PerThreadSynch *x0 = nullptr;
+ PerThreadSynch *x1 = x;
+ PerThreadSynch *x2 = x->skip;
+ if (x2 != nullptr) {
+ // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
+ // such that x1 == x0->skip && x2 == x1->skip
+ while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
+ x0->skip = x2; // short-circuit skip from x0 to x2
+ }
+ x->skip = x1; // short-circuit skip from x to result
+ }
+ return x1;
+}
+
+// "ancestor" appears before "to_be_removed" in the same Mutex waiter queue.
+// The latter is going to be removed out of order, because of a timeout.
+// Check whether "ancestor" has a skip field pointing to "to_be_removed",
+// and fix it if it does.
+static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+ if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
+ if (to_be_removed->skip != nullptr) {
+ ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
+ } else if (ancestor->next != to_be_removed) { // they are not adjacent
+ ancestor->skip = ancestor->next; // can skip one past ancestor
+ } else {
+ ancestor->skip = nullptr; // can't skip at all
+ }
+ }
+}
+
+static void CondVarEnqueue(SynchWaitParams *waitp);
+
+// Enqueue thread "waitp->thread" on a waiter queue.
+// Called with mutex spinlock held if head != nullptr
+// If head==nullptr and waitp->cv_word==nullptr, then Enqueue() is
+// idempotent; it alters no state associated with the existing (empty)
+// queue.
+//
+// If waitp->cv_word == nullptr, queue the thread at either the front or
+// the end (according to its priority) of the circular mutex waiter queue whose
+// head is "head", and return the new head. mu is the previous mutex state,
+// which contains the reader count (perhaps adjusted for the operation in
+// progress) if the list was empty and a read lock held, and the holder hint if
+// the list was empty and a write lock held. (flags & kMuIsCond) indicates
+// whether this thread was transferred from a CondVar or is waiting for a
+// non-trivial condition. In this case, Enqueue() never returns nullptr
+//
+// If waitp->cv_word != nullptr, CondVarEnqueue() is called, and "head" is
+// returned. This mechanism is used by CondVar to queue a thread on the
+// condition variable queue instead of the mutex queue in implementing Wait().
+// In this case, Enqueue() can return nullptr (if head==nullptr).
+static PerThreadSynch *Enqueue(PerThreadSynch *head,
+ SynchWaitParams *waitp, intptr_t mu, int flags) {
+ // If we have been given a cv_word, call CondVarEnqueue() and return
+ // the previous head of the Mutex waiter queue.
+ if (waitp->cv_word != nullptr) {
+ CondVarEnqueue(waitp);
+ return head;
+ }
+
+ PerThreadSynch *s = waitp->thread;
+ ABSL_RAW_CHECK(
+ s->waitp == nullptr || // normal case
+ s->waitp == waitp || // Fer()---transfer from condition variable
+ s->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ s->waitp = waitp;
+ s->skip = nullptr; // maintain skip invariant (see above)
+ s->may_skip = true; // always true on entering queue
+ s->wake = false; // not being woken
+ s->cond_waiter = ((flags & kMuIsCond) != 0);
+ if (head == nullptr) { // s is the only waiter
+ s->next = s; // it's the only entry in the cycle
+ s->readers = mu; // reader count is from mu word
+ s->maybe_unlocking = false; // no one is searching an empty list
+ head = s; // s is new head
+ } else {
+ PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+ int64_t now_cycles = base_internal::CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // base_internal::CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ pthread_getschedparam(pthread_self(), &policy, &param);
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles +
+ static_cast<int64_t>(base_internal::CycleClock::Frequency());
+ }
+ if (s->priority > head->priority) { // s's priority is above head's
+ // try to put s in priority-fifo order, or failing that at the front.
+ if (!head->maybe_unlocking) {
+ // No unlocker can be scanning the queue, so we can insert between
+ // skip-chains, and within a skip-chain if it has the same condition as
+ // s. We insert in priority-fifo order, examining the end of every
+ // skip-chain, plus every element with the same condition as s.
+ PerThreadSynch *advance_to = head; // next value of enqueue_after
+ PerThreadSynch *cur; // successor of enqueue_after
+ do {
+ enqueue_after = advance_to;
+ cur = enqueue_after->next; // this advance ensures progress
+ advance_to = Skip(cur); // normally, advance to end of skip chain
+ // (side-effect: optimizes skip chain)
+ if (advance_to != cur && s->priority > advance_to->priority &&
+ MuSameCondition(s, cur)) {
+ // but this skip chain is not a singleton, s has higher priority
+ // than its tail and has the same condition as the chain,
+ // so we can insert within the skip-chain
+ advance_to = cur; // advance by just one
+ }
+ } while (s->priority <= advance_to->priority);
+ // termination guaranteed because s->priority > head->priority
+ // and head is the end of a skip chain
+ } else if (waitp->how == kExclusive &&
+ Condition::GuaranteedEqual(waitp->cond, nullptr)) {
+ // An unlocker could be scanning the queue, but we know it will recheck
+ // the queue front for writers that have no condition, which is what s
+ // is, so an insert at front is safe.
+ enqueue_after = head; // add after head, at front
+ }
+ }
+#endif
+ if (enqueue_after != nullptr) {
+ s->next = enqueue_after->next;
+ enqueue_after->next = s;
+
+ // enqueue_after can be: head, Skip(...), or cur.
+ // The first two imply enqueue_after->skip == nullptr, and
+ // the last is used only if MuSameCondition(s, cur).
+ // We require this because clearing enqueue_after->skip
+ // is impossible; enqueue_after's predecessors might also
+ // incorrectly skip over s if we were to allow other
+ // insertion points.
+ ABSL_RAW_CHECK(
+ enqueue_after->skip == nullptr || MuSameCondition(enqueue_after, s),
+ "Mutex Enqueue failure");
+
+ if (enqueue_after != head && enqueue_after->may_skip &&
+ MuSameCondition(enqueue_after, enqueue_after->next)) {
+ // enqueue_after can skip to its new successor, s
+ enqueue_after->skip = enqueue_after->next;
+ }
+ if (MuSameCondition(s, s->next)) { // s->may_skip is known to be true
+ s->skip = s->next; // s may skip to its successor
+ }
+ } else { // enqueue not done any other way, so
+ // we're inserting s at the back
+ // s will become new head; copy data from head into it
+ s->next = head->next; // add s after head
+ head->next = s;
+ s->readers = head->readers; // reader count is from previous head
+ s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
+ if (head->may_skip && MuSameCondition(head, s)) {
+ // head now has successor; may skip
+ head->skip = s;
+ }
+ head = s; // s is new head
+ }
+ }
+ s->state.store(PerThreadSynch::kQueued, std::memory_order_relaxed);
+ return head;
+}
+
+// Dequeue the successor pw->next of thread pw from the Mutex waiter queue
+// whose last element is head. The new head element is returned, or null
+// if the list is made empty.
+// Dequeue is called with both spinlock and Mutex held.
+static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
+ PerThreadSynch *w = pw->next;
+ pw->next = w->next; // snip w out of list
+ if (head == w) { // we removed the head
+ head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
+ } else if (pw != head && MuSameCondition(pw, pw->next)) {
+ // pw can skip to its new successor
+ if (pw->next->skip !=
+ nullptr) { // either skip to its successors skip target
+ pw->skip = pw->next->skip;
+ } else { // or to pw's successor
+ pw->skip = pw->next;
+ }
+ }
+ return head;
+}
+
+// Traverse the elements [ pw->next, h] of the circular list whose last element
+// is head.
+// Remove all elements with wake==true and place them in the
+// singly-linked list wake_list in the order found. Assumes that
+// there is only one such element if the element has how == kExclusive.
+// Return the new head.
+static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
+ PerThreadSynch *pw,
+ PerThreadSynch **wake_tail) {
+ PerThreadSynch *orig_h = head;
+ PerThreadSynch *w = pw->next;
+ bool skipped = false;
+ do {
+ if (w->wake) { // remove this element
+ ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
+ // we're removing pw's successor so either pw->skip is zero or we should
+ // already have removed pw since if pw->skip!=null, pw has the same
+ // condition as w.
+ head = Dequeue(head, pw);
+ w->next = *wake_tail; // keep list terminated
+ *wake_tail = w; // add w to wake_list;
+ wake_tail = &w->next; // next addition to end
+ if (w->waitp->how == kExclusive) { // wake at most 1 writer
+ break;
+ }
+ } else { // not waking this one; skip
+ pw = Skip(w); // skip as much as possible
+ skipped = true;
+ }
+ w = pw->next;
+ // We want to stop processing after we've considered the original head,
+ // orig_h. We can't test for w==orig_h in the loop because w may skip over
+ // it; we are guaranteed only that w's predecessor will not skip over
+ // orig_h. When we've considered orig_h, either we've processed it and
+ // removed it (so orig_h != head), or we considered it and skipped it (so
+ // skipped==true && pw == head because skipping from head always skips by
+ // just one, leaving pw pointing at head). So we want to
+ // continue the loop with the negation of that expression.
+ } while (orig_h == head && (pw != head || !skipped));
+ return head;
+}
+
+// Try to remove thread s from the list of waiters on this mutex.
+// Does nothing if s is not on the waiter list.
+void Mutex::TryRemove(PerThreadSynch *s) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // acquire spinlock & lock
+ if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ if (h != nullptr) {
+ PerThreadSynch *pw = h; // pw is w's predecessor
+ PerThreadSynch *w;
+ if ((w = pw->next) != s) { // search for thread,
+ do { // processing at least one element
+ if (!MuSameCondition(s, w)) { // seeking different condition
+ pw = Skip(w); // so skip all that won't match
+ // we don't have to worry about dangling skip fields
+ // in the threads we skipped; none can point to s
+ // because their condition differs from s
+ } else { // seeking same condition
+ FixSkip(w, s); // fix up any skip pointer from w to s
+ pw = w;
+ }
+ // don't search further if we found the thread, or we're about to
+ // process the first thread again.
+ } while ((w = pw->next) != s && pw != h);
+ }
+ if (w == s) { // found thread; remove it
+ // pw->skip may be non-zero here; the loop above ensured that
+ // no ancestor of s can skip to s, so removal is safe anyway.
+ h = Dequeue(h, pw);
+ s->next = nullptr;
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ }
+ }
+ intptr_t nv;
+ do { // release spinlock and lock
+ v = mu_.load(std::memory_order_relaxed);
+ nv = v & (kMuDesig | kMuEvent);
+ if (h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(h);
+ h->readers = 0; // we hold writer lock
+ h->maybe_unlocking = false; // finished unlocking
+ }
+ } while (!mu_.compare_exchange_weak(v, nv,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ }
+}
+
+// Wait until thread "s", which must be the current thread, is removed from the
+// this mutex's waiter queue. If "s->waitp->timeout" has a timeout, wake up
+// if the wait extends past the absolute time specified, even if "s" is still
+// on the mutex queue. In this case, remove "s" from the queue and return
+// true, otherwise return false.
+void Mutex::Block(PerThreadSynch *s) {
+ while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
+ if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
+ // After a timeout, we go into a spin loop until we remove ourselves
+ // from the queue, or someone else removes us. We can't be sure to be
+ // able to remove ourselves in a single lock acquisition because this
+ // mutex may be held, and the holder has the right to read the centre
+ // of the waiter queue without holding the spinlock.
+ this->TryRemove(s);
+ int c = 0;
+ while (s->next != nullptr) {
+ c = Delay(c, GENTLE);
+ this->TryRemove(s);
+ }
+ if (kDebugMode) {
+ // This ensures that we test the case that TryRemove() is called when s
+ // is not on the queue.
+ this->TryRemove(s);
+ }
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
+ s->waitp->cond = nullptr; // condition no longer relevant for wakeups
+ }
+ }
+ ABSL_RAW_CHECK(s->waitp != nullptr || s->suppress_fatal_errors,
+ "detected illegal recursion in Mutex code");
+ s->waitp = nullptr;
+}
+
+// Wake thread w, and return the next thread in the list.
+PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
+ PerThreadSynch *next = w->next;
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ IncrementSynchSem(this, w);
+
+ return next;
+}
+
+static GraphId GetGraphIdLocked(Mutex *mu)
+ EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
+ if (!deadlock_graph) { // (re)create the deadlock graph.
+ deadlock_graph =
+ new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
+ GraphCycles;
+ }
+ return deadlock_graph->GetId(mu);
+}
+
+static GraphId GetGraphId(Mutex *mu) LOCKS_EXCLUDED(deadlock_graph_mu) {
+ deadlock_graph_mu.Lock();
+ GraphId id = GetGraphIdLocked(mu);
+ deadlock_graph_mu.Unlock();
+ return id;
+}
+
+// Record a lock acquisition. This is used in debug mode for deadlock
+// detection. The held_locks pointer points to the relevant data
+// structure for each case.
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+ int n = held_locks->n;
+ int i = 0;
+ while (i != n && held_locks->locks[i].id != id) {
+ i++;
+ }
+ if (i == n) {
+ if (n == ABSL_ARRAYSIZE(held_locks->locks)) {
+ held_locks->overflow = true; // lost some data
+ } else { // we have room for lock
+ held_locks->locks[i].mu = mu;
+ held_locks->locks[i].count = 1;
+ held_locks->locks[i].id = id;
+ held_locks->n = n + 1;
+ }
+ } else {
+ held_locks->locks[i].count++;
+ }
+}
+
+// Record a lock release. Each call to LockEnter(mu, id, x) should be
+// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
+// It does not process the event if is not needed when deadlock detection is
+// disabled.
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+ int n = held_locks->n;
+ int i = 0;
+ while (i != n && held_locks->locks[i].id != id) {
+ i++;
+ }
+ if (i == n) {
+ if (!held_locks->overflow) {
+ // The deadlock id may have been reassigned after ForgetDeadlockInfo,
+ // but in that case mu should still be present.
+ i = 0;
+ while (i != n && held_locks->locks[i].mu != mu) {
+ i++;
+ }
+ if (i == n) { // mu missing means releasing unheld lock
+ SynchEvent *mu_events = GetSynchEvent(mu);
+ ABSL_RAW_LOG(FATAL,
+ "thread releasing lock it does not hold: %p %s; "
+ ,
+ static_cast<void *>(mu),
+ mu_events == nullptr ? "" : mu_events->name);
+ }
+ }
+ } else if (held_locks->locks[i].count == 1) {
+ held_locks->n = n - 1;
+ held_locks->locks[i] = held_locks->locks[n - 1];
+ held_locks->locks[n - 1].id = InvalidGraphId();
+ held_locks->locks[n - 1].mu =
+ nullptr; // clear mu to please the leak detector.
+ } else {
+ assert(held_locks->locks[i].count > 0);
+ held_locks->locks[i].count--;
+ }
+}
+
+// Call LockEnter() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockEnter(Mutex *mu) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockEnter(mu, GetGraphId(mu), Synch_GetAllLocks());
+ }
+ }
+}
+
+// Call LockEnter() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockEnter(mu, id, Synch_GetAllLocks());
+ }
+ }
+}
+
+// Call LockLeave() if in debug mode and deadlock detection is enabled.
+static inline void DebugOnlyLockLeave(Mutex *mu) {
+ if (kDebugMode) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ LockLeave(mu, GetGraphId(mu), Synch_GetAllLocks());
+ }
+ }
+}
+
+static char *StackString(void **pcs, int n, char *buf, int maxlen,
+ bool symbolize) {
+ static const int kSymLen = 200;
+ char sym[kSymLen];
+ int len = 0;
+ for (int i = 0; i != n; i++) {
+ if (symbolize) {
+ if (!symbolizer(pcs[i], sym, kSymLen)) {
+ sym[0] = '\0';
+ }
+ snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
+ (i == 0 ? "\n" : ""),
+ pcs[i], sym);
+ } else {
+ snprintf(buf + len, maxlen - len, " %p", pcs[i]);
+ }
+ len += strlen(&buf[len]);
+ }
+ return buf;
+}
+
+static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
+ void *pcs[40];
+ return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
+ maxlen, symbolize);
+}
+
+namespace {
+enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
+ // a path this long would be remarkable
+// Buffers required to report a deadlock.
+// We do not allocate them on stack to avoid large stack frame.
+struct DeadlockReportBuffers {
+ char buf[6100];
+ GraphId path[kMaxDeadlockPathLen];
+};
+
+struct ScopedDeadlockReportBuffers {
+ ScopedDeadlockReportBuffers() {
+ b = reinterpret_cast<DeadlockReportBuffers *>(
+ base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
+ }
+ ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
+ DeadlockReportBuffers *b;
+};
+
+// Helper to pass to GraphCycles::UpdateStackTrace.
+int GetStack(void** stack, int max_depth) {
+ return absl::GetStackTrace(stack, max_depth, 3);
+}
+} // anonymous namespace
+
+// Called in debug mode when a thread is about to acquire a lock in a way that
+// may block.
+static GraphId DeadlockCheck(Mutex *mu) {
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
+ OnDeadlockCycle::kIgnore) {
+ return InvalidGraphId();
+ }
+
+ SynchLocksHeld *all_locks = Synch_GetAllLocks();
+
+ absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
+ const GraphId mu_id = GetGraphIdLocked(mu);
+
+ if (all_locks->n == 0) {
+ // There are no other locks held. Return now so that we don't need to
+ // call GetSynchEvent(). This way we do not record the stack trace
+ // for this Mutex. It's ok, since if this Mutex is involved in a deadlock,
+ // it can't always be the first lock acquired by a thread.
+ return mu_id;
+ }
+
+ // We prefer to keep stack traces that show a thread holding and acquiring
+ // as many locks as possible. This increases the chances that a given edge
+ // in the acquires-before graph will be represented in the stack traces
+ // recorded for the locks.
+ deadlock_graph->UpdateStackTrace(mu_id, all_locks->n + 1, GetStack);
+
+ // For each other mutex already held by this thread:
+ for (int i = 0; i != all_locks->n; i++) {
+ const GraphId other_node_id = all_locks->locks[i].id;
+ const Mutex *other =
+ static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+ if (other == nullptr) {
+ // Ignore stale lock
+ continue;
+ }
+
+ // Add the acquired-before edge to the graph.
+ if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
+ ScopedDeadlockReportBuffers scoped_buffers;
+ DeadlockReportBuffers *b = scoped_buffers.b;
+ static int number_of_reported_deadlocks = 0;
+ number_of_reported_deadlocks++;
+ // Symbolize only 2 first deadlock report to avoid huge slowdowns.
+ bool symbolize = number_of_reported_deadlocks <= 2;
+ ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
+ CurrentStackString(b->buf, sizeof (b->buf), symbolize));
+ int len = 0;
+ for (int j = 0; j != all_locks->n; j++) {
+ void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
+ if (pr != nullptr) {
+ snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
+ len += static_cast<int>(strlen(&b->buf[len]));
+ }
+ }
+ ABSL_RAW_LOG(ERROR, "Acquiring %p Mutexes held: %s",
+ static_cast<void *>(mu), b->buf);
+ ABSL_RAW_LOG(ERROR, "Cycle: ");
+ int path_len = deadlock_graph->FindPath(
+ mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
+ for (int j = 0; j != path_len; j++) {
+ GraphId id = b->path[j];
+ Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+ if (path_mu == nullptr) continue;
+ void** stack;
+ int depth = deadlock_graph->GetStackTrace(id, &stack);
+ snprintf(b->buf, sizeof(b->buf),
+ "mutex@%p stack: ", static_cast<void *>(path_mu));
+ StackString(stack, depth, b->buf + strlen(b->buf),
+ static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
+ symbolize);
+ ABSL_RAW_LOG(ERROR, "%s", b->buf);
+ }
+ if (synch_deadlock_detection.load(std::memory_order_acquire) ==
+ OnDeadlockCycle::kAbort) {
+ deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
+ ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
+ return mu_id;
+ }
+ break; // report at most one potential deadlock per acquisition
+ }
+ }
+
+ return mu_id;
+}
+
+// Invoke DeadlockCheck() iff we're in debug mode and
+// deadlock checking has been enabled.
+static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ return DeadlockCheck(mu);
+ } else {
+ return InvalidGraphId();
+ }
+}
+
+void Mutex::ForgetDeadlockInfo() {
+ if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ deadlock_graph_mu.Lock();
+ if (deadlock_graph != nullptr) {
+ deadlock_graph->RemoveNode(this);
+ }
+ deadlock_graph_mu.Unlock();
+ }
+}
+
+void Mutex::AssertNotHeld() const {
+ // We have the data to allow this check only if in debug mode and deadlock
+ // detection is enabled.
+ if (kDebugMode &&
+ (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
+ synch_deadlock_detection.load(std::memory_order_acquire) !=
+ OnDeadlockCycle::kIgnore) {
+ GraphId id = GetGraphId(const_cast<Mutex *>(this));
+ SynchLocksHeld *locks = Synch_GetAllLocks();
+ for (int i = 0; i != locks->n; i++) {
+ if (locks->locks[i].id == id) {
+ SynchEvent *mu_events = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
+ static_cast<const void *>(this),
+ (mu_events == nullptr ? "" : mu_events->name));
+ }
+ }
+ }
+}
+
+// Attempt to acquire *mu, and return whether successful. The implementation
+// may spin for a short while if the lock cannot be acquired immediately.
+static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
+ int c = mutex_globals.spinloop_iterations;
+ int result = -1; // result of operation: 0=false, 1=true, -1=unknown
+
+ do { // do/while somewhat faster on AMD
+ intptr_t v = mu->load(std::memory_order_relaxed);
+ if ((v & (kMuReader|kMuEvent)) != 0) { // a reader or tracing -> give up
+ result = 0;
+ } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
+ mu->compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ result = 1;
+ }
+ } while (result == -1 && --c > 0);
+ return result == 1;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // try fast acquire, then spin loop
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
+ !mu_.compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ // try spin acquire, then slow loop
+ if (!TryAcquireWithSpinning(&this->mu_)) {
+ this->LockSlow(kExclusive, nullptr, 0);
+ }
+ }
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // try fast acquire, then slow loop
+ if ((v & (kMuWriter | kMuWait | kMuEvent)) != 0 ||
+ !mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ this->LockSlow(kShared, nullptr, 0);
+ }
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+}
+
+void Mutex::LockWhen(const Condition &cond) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ this->LockSlow(kExclusive, &cond, 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+}
+
+bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
+ return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
+ KernelTimeout(deadline), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
+}
+
+void Mutex::ReaderLockWhen(const Condition &cond) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ this->LockSlow(kShared, &cond, 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+}
+
+bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+ absl::Duration timeout) {
+ return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+ absl::Time deadline) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(deadline), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
+}
+
+void Mutex::Await(const Condition &cond) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ } else { // normal case
+ ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
+ "condition untrue on return from Await");
+ }
+}
+
+bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
+ return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+}
+
+bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{deadline};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
+}
+
+bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+ this->AssertReaderHeld();
+ MuHow how =
+ (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
+ SynchWaitParams waitp(
+ how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
+ int flags = kMuHasBlocked;
+ if (!Condition::GuaranteedEqual(&cond, nullptr)) {
+ flags |= kMuIsCond;
+ }
+ this->UnlockSlow(&waitp);
+ this->Block(waitp.thread);
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, TsanFlags(how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, TsanFlags(how));
+ this->LockSlowLoop(&waitp, flags);
+ bool res = waitp.cond != nullptr || // => cond known true from LockSlowLoop
+ cond.Eval();
+ ABSL_TSAN_MUTEX_POST_LOCK(this, TsanFlags(how), 0);
+ return res;
+}
+
+ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(v, kMuWriter | v,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(
+ v, (kExclusive->fast_or | v) + kExclusive->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_SUCCESS);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
+ return true;
+ } else {
+ PostSynchEvent(this, SYNCH_EV_TRYLOCK_FAILED);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_try_lock | __tsan_mutex_try_lock_failed, 0);
+ return false;
+}
+
+ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this,
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // The while-loops (here and below) iterate only if the mutex word keeps
+ // changing (typically because the reader count changes) under the CAS. We
+ // limit the number of attempts to avoid having to think about livelock.
+ int loop_limit = 5;
+ while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ loop_limit--;
+ v = mu_.load(std::memory_order_relaxed);
+ }
+ if ((v & kMuEvent) != 0) { // we're recording events
+ loop_limit = 5;
+ while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
+ if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ DebugOnlyLockEnter(this);
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_SUCCESS);
+ ABSL_TSAN_MUTEX_POST_LOCK(
+ this, __tsan_mutex_read_lock | __tsan_mutex_try_lock, 0);
+ return true;
+ }
+ loop_limit--;
+ v = mu_.load(std::memory_order_relaxed);
+ }
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_READERTRYLOCK_FAILED);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_LOCK(this,
+ __tsan_mutex_read_lock | __tsan_mutex_try_lock |
+ __tsan_mutex_try_lock_failed,
+ 0);
+ return false;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+ DebugOnlyLockLeave(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+
+ if (kDebugMode && ((v & (kMuWriter | kMuReader)) != kMuWriter)) {
+ ABSL_RAW_LOG(FATAL, "Mutex unlocked when destroyed or not locked: v=0x%x",
+ static_cast<unsigned>(v));
+ }
+
+ // should_try_cas is whether we'll try a compare-and-swap immediately.
+ // NOTE: optimized out when kDebugMode is false.
+ bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
+ (v & (kMuWait | kMuDesig)) != kMuWait);
+ // But, we can use an alternate computation of it, that compilers
+ // currently don't find on their own. When that changes, this function
+ // can be simplified.
+ intptr_t x = (v ^ (kMuWriter | kMuWait)) & (kMuWriter | kMuEvent);
+ intptr_t y = (v ^ (kMuWriter | kMuWait)) & (kMuWait | kMuDesig);
+ // Claim: "x == 0 && y > 0" is equal to should_try_cas.
+ // Also, because kMuWriter and kMuEvent exceed kMuDesig and kMuWait,
+ // all possible non-zero values for x exceed all possible values for y.
+ // Therefore, (x == 0 && y > 0) == (x < y).
+ if (kDebugMode && should_try_cas != (x < y)) {
+ // We would usually use PRIdPTR here, but is not correctly implemented
+ // within the android toolchain.
+ ABSL_RAW_LOG(FATAL, "internal logic error %llx %llx %llx\n",
+ static_cast<long long>(v), static_cast<long long>(x),
+ static_cast<long long>(y));
+ }
+ if (x < y &&
+ mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ // fast writer release (writer with no waiters or with designated waker)
+ } else {
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
+ }
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+}
+
+// Requires v to represent a reader-locked state.
+static bool ExactlyOneReader(intptr_t v) {
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ assert((v & kMuHigh) != 0);
+ // The more straightforward "(v & kMuHigh) == kMuOne" also works, but
+ // on some architectures the following generates slightly smaller code.
+ // It may be faster too.
+ constexpr intptr_t kMuMultipleWaitersMask = kMuHigh ^ kMuOne;
+ return (v & kMuMultipleWaitersMask) == 0;
+}
+
+ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
+ DebugOnlyLockLeave(this);
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+ // fast reader release (reader with no waiters)
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
+ return;
+ }
+ }
+ this->UnlockSlow(nullptr /*no waitp*/); // take slow path
+ ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
+}
+
+// The zap_desig_waker bitmask is used to clear the designated waker flag in
+// the mutex if this thread has blocked, and therefore may be the designated
+// waker.
+static const intptr_t zap_desig_waker[] = {
+ ~static_cast<intptr_t>(0), // not blocked
+ ~static_cast<intptr_t>(
+ kMuDesig) // blocked; turn off the designated waker bit
+};
+
+// The ignore_waiting_writers bitmask is used to ignore the existence
+// of waiting writers if a reader that has already blocked once
+// wakes up.
+static const intptr_t ignore_waiting_writers[] = {
+ ~static_cast<intptr_t>(0), // not blocked
+ ~static_cast<intptr_t>(
+ kMuWrWait) // blocked; pretend there are no waiting writers
+};
+
+// Internal version of LockWhen(). See LockSlowWithDeadline()
+void Mutex::LockSlow(MuHow how, const Condition *cond, int flags) {
+ ABSL_RAW_CHECK(
+ this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
+ "condition untrue on return from LockSlow");
+}
+
+// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
+static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+ bool locking, Mutex::MuHow how) {
+ // Delicate annotation dance.
+ // We are currently inside of read/write lock/unlock operation.
+ // All memory accesses are ignored inside of mutex operations + for unlock
+ // operation tsan considers that we've already released the mutex.
+ bool res = false;
+ if (locking) {
+ // For lock we pretend that we have finished the operation,
+ // evaluate the predicate, then unlock the mutex and start locking it again
+ // to match the annotation at the end of outer lock operation.
+ // Note: we can't simply do POST_LOCK, Eval, PRE_LOCK, because then tsan
+ // will think the lock acquisition is recursive which will trigger
+ // deadlock detector.
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, TsanFlags(how), 0);
+ res = cond->Eval();
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, TsanFlags(how));
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, TsanFlags(how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, TsanFlags(how));
+ } else {
+ // Similarly, for unlock we pretend that we have unlocked the mutex,
+ // lock the mutex, evaluate the predicate, and start unlocking it again
+ // to match the annotation at the end of outer unlock operation.
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mu, TsanFlags(how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(mu, TsanFlags(how));
+ ABSL_TSAN_MUTEX_POST_LOCK(mu, TsanFlags(how), 0);
+ res = cond->Eval();
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mu, TsanFlags(how));
+ }
+ // Prevent unused param warnings in non-TSAN builds.
+ static_cast<void>(mu);
+ static_cast<void>(how);
+ return res;
+}
+
+// Compute cond->Eval() hiding it from race detectors.
+// We are hiding it because inside of UnlockSlow we can evaluate a predicate
+// that was just added by a concurrent Lock operation; Lock adds the predicate
+// to the internal Mutex list without actually acquiring the Mutex
+// (it only acquires the internal spinlock, which is rightfully invisible for
+// tsan). As the result there is no tsan-visible synchronization between the
+// addition and this thread. So if we would enable race detection here,
+// it would race with the predicate initialization.
+static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+ // Memory accesses are already ignored inside of lock/unlock operations,
+ // but synchronization operations are also ignored. When we evaluate the
+ // predicate we must ignore only memory accesses but not synchronization,
+ // because missed synchronization can lead to false reports later.
+ // So we "divert" (which un-ignores both memory accesses and synchronization)
+ // and then separately turn on ignores of memory accesses.
+ ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
+ ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ bool res = cond->Eval();
+ ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
+ return res;
+}
+
+// Internal equivalent of *LockWhenWithDeadline(), where
+// "t" represents the absolute timeout; !t.has_timeout() means "forever".
+// "how" is "kShared" (for ReaderLockWhen) or "kExclusive" (for LockWhen)
+// In flags, bits are ored together:
+// - kMuHasBlocked indicates that the client has already blocked on the call so
+// the designated waker bit must be cleared and waiting writers should not
+// obstruct this call
+// - kMuIsCond indicates that this is a conditional acquire (condition variable,
+// Await, LockWhen) so contention profiling should be suppressed.
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+ KernelTimeout t, int flags) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ bool unlock = false;
+ if ((v & how->fast_need_zero) == 0 && // try fast acquire
+ mu_.compare_exchange_strong(
+ v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
+ how->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ if (cond == nullptr || EvalConditionAnnotated(cond, this, true, how)) {
+ return true;
+ }
+ unlock = true;
+ }
+ SynchWaitParams waitp(
+ how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
+ if (!Condition::GuaranteedEqual(cond, nullptr)) {
+ flags |= kMuIsCond;
+ }
+ if (unlock) {
+ this->UnlockSlow(&waitp);
+ this->Block(waitp.thread);
+ flags |= kMuHasBlocked;
+ }
+ this->LockSlowLoop(&waitp, flags);
+ return waitp.cond != nullptr || // => cond known true from LockSlowLoop
+ cond == nullptr || EvalConditionAnnotated(cond, this, true, how);
+}
+
+// RAW_CHECK_FMT() takes a condition, a printf-style format std::string, and
+// the printf-style argument list. The format std::string must be a literal.
+// Arguments after the first are not evaluated unless the condition is true.
+#define RAW_CHECK_FMT(cond, ...) \
+ do { \
+ if (ABSL_PREDICT_FALSE(!(cond))) { \
+ ABSL_RAW_LOG(FATAL, "Check " #cond " failed: " __VA_ARGS__); \
+ } \
+ } while (0)
+
+static void CheckForMutexCorruption(intptr_t v, const char* label) {
+ // Test for either of two situations that should not occur in v:
+ // kMuWriter and kMuReader
+ // kMuWrWait and !kMuWait
+ const intptr_t w = v ^ kMuWait;
+ // By flipping that bit, we can now test for:
+ // kMuWriter and kMuReader in w
+ // kMuWrWait and kMuWait in w
+ // We've chosen these two pairs of values to be so that they will overlap,
+ // respectively, when the word is left shifted by three. This allows us to
+ // save a branch in the common (correct) case of them not being coincident.
+ static_assert(kMuReader << 3 == kMuWriter, "must match");
+ static_assert(kMuWait << 3 == kMuWrWait, "must match");
+ if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
+ RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
+ "%s: Mutex corrupt: both reader and writer lock held: %p",
+ label, reinterpret_cast<void *>(v));
+ RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
+ "%s: Mutex corrupt: waiting writer with no waiters: %p",
+ label, reinterpret_cast<void *>(v));
+ assert(false);
+}
+
+void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+ int c = 0;
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ for (;;) {
+ v = mu_.load(std::memory_order_relaxed);
+ CheckForMutexCorruption(v, "Lock");
+ if ((v & waitp->how->slow_need_zero) == 0) {
+ if (mu_.compare_exchange_strong(
+ v, (waitp->how->fast_or |
+ (v & zap_desig_waker[flags & kMuHasBlocked])) +
+ waitp->how->fast_add,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ if (waitp->cond == nullptr ||
+ EvalConditionAnnotated(waitp->cond, this, true, waitp->how)) {
+ break; // we timed out, or condition true, so return
+ }
+ this->UnlockSlow(waitp); // got lock but condition false
+ this->Block(waitp->thread);
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ } else { // need to access waiter list
+ bool dowait = false;
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ // This thread tries to become the one and only waiter.
+ PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
+ intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
+ kMuWait;
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
+ nv |= kMuWrWait;
+ }
+ if (mu_.compare_exchange_strong(
+ v, reinterpret_cast<intptr_t>(new_h) | nv,
+ std::memory_order_release, std::memory_order_relaxed)) {
+ dowait = true;
+ } else { // attempted Enqueue() failed
+ // zero out the waitp field set by Enqueue()
+ waitp->thread->waitp = nullptr;
+ }
+ } else if ((v & waitp->how->slow_inc_need_zero &
+ ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
+ // This is a reader that needs to increment the reader count,
+ // but the count is currently held in the last waiter.
+ if (mu_.compare_exchange_strong(
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
+ kMuReader,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ h->readers += kMuOne; // inc reader count in waiter
+ do { // release spinlock
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ if (waitp->cond == nullptr ||
+ EvalConditionAnnotated(waitp->cond, this, true, waitp->how)) {
+ break; // we timed out, or condition true, so return
+ }
+ this->UnlockSlow(waitp); // got lock but condition false
+ this->Block(waitp->thread);
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ } else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
+ mu_.compare_exchange_strong(
+ v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
+ kMuWait,
+ std::memory_order_acquire, std::memory_order_relaxed)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+ intptr_t wr_wait = 0;
+ ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
+ if (waitp->how == kExclusive && (v & kMuReader) != 0) {
+ wr_wait = kMuWrWait; // give priority to a waiting writer
+ }
+ do { // release spinlock
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(
+ v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+ reinterpret_cast<intptr_t>(new_h),
+ std::memory_order_release, std::memory_order_relaxed));
+ dowait = true;
+ }
+ if (dowait) {
+ this->Block(waitp->thread); // wait until removed from list or timeout
+ flags |= kMuHasBlocked;
+ c = 0;
+ }
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ c = Delay(c, GENTLE); // delay, then try again
+ }
+ ABSL_RAW_CHECK(
+ waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
+ SYNCH_EV_READERLOCK_RETURNING);
+ }
+}
+
+// Unlock this mutex, which is held by the current thread.
+// If waitp is non-zero, it must be the wait parameters for the current thread
+// which holds the lock but is not runnable because its condition is false
+// or it n the process of blocking on a condition variable; it must requeue
+// itself on the mutex/condvar to wait for its condition to become true.
+void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ this->AssertReaderHeld();
+ CheckForMutexCorruption(v, "Unlock");
+ if ((v & kMuEvent) != 0) {
+ PostSynchEvent(this,
+ (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+ }
+ int c = 0;
+ // the waiter under consideration to wake, or zero
+ PerThreadSynch *w = nullptr;
+ // the predecessor to w or zero
+ PerThreadSynch *pw = nullptr;
+ // head of the list searched previously, or zero
+ PerThreadSynch *old_h = nullptr;
+ // a condition that's known to be false.
+ const Condition *known_false = nullptr;
+ PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
+ // later writer could have acquired the lock
+ // (starvation avoidance)
+ ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
+ waitp->thread->suppress_fatal_errors,
+ "detected illegal recursion into Mutex code");
+ // This loop finds threads wake_list to wakeup if any, and removes them from
+ // the list of waiters. In addition, it places waitp.thread on the queue of
+ // waiters if waitp is non-zero.
+ for (;;) {
+ v = mu_.load(std::memory_order_relaxed);
+ if ((v & kMuWriter) != 0 && (v & (kMuWait | kMuDesig)) != kMuWait &&
+ waitp == nullptr) {
+ // fast writer release (writer with no waiters or with designated waker)
+ if (mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
+ // fast reader release (reader with no waiters)
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & kMuSpin) == 0 && // attempt to get spinlock
+ mu_.compare_exchange_strong(v, v | kMuSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ if ((v & kMuWait) == 0) { // no one to wake
+ intptr_t nv;
+ bool do_enqueue = true; // always Enqueue() the first time
+ ABSL_RAW_CHECK(waitp != nullptr,
+ "UnlockSlow is confused"); // about to sleep
+ do { // must loop to release spinlock as reader count may change
+ v = mu_.load(std::memory_order_relaxed);
+ // decrement reader count if there are readers
+ intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
+ PerThreadSynch *new_h = nullptr;
+ if (do_enqueue) {
+ // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
+ // we must not retry here. The initial attempt will always have
+ // succeeded, further attempts would enqueue us against *this due to
+ // Fer() handling.
+ do_enqueue = (waitp->cv_word == nullptr);
+ new_h = Enqueue(nullptr, waitp, new_readers, kMuIsCond);
+ }
+ intptr_t clear = kMuWrWait | kMuWriter; // by default clear write bit
+ if ((v & kMuWriter) == 0 && ExactlyOneReader(v)) { // last reader
+ clear = kMuWrWait | kMuReader; // clear read bit
+ }
+ nv = (v & kMuLow & ~clear & ~kMuSpin);
+ if (new_h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ } else { // new_h could be nullptr if we queued ourselves on a
+ // CondVar
+ // In that case, we must place the reader count back in the mutex
+ // word, as Enqueue() did not store it in the new waiter.
+ nv |= new_readers & kMuHigh;
+ }
+ // release spinlock & our lock; retry if reader-count changed
+ // (writer count cannot change since we hold lock)
+ } while (!mu_.compare_exchange_weak(v, nv,
+ std::memory_order_release,
+ std::memory_order_relaxed));
+ break;
+ }
+
+ // There are waiters.
+ // Set h to the head of the circular waiter list.
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
+ // a reader but not the last
+ h->readers -= kMuOne; // release our lock
+ intptr_t nv = v; // normally just release spinlock
+ if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "waiters disappeared during Enqueue()!");
+ nv &= kMuLow;
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ }
+ mu_.store(nv, std::memory_order_release); // release spinlock
+ // can release with a store because there were waiters
+ break;
+ }
+
+ // Either we didn't search before, or we marked the queue
+ // as "maybe_unlocking" and no one else should have changed it.
+ ABSL_RAW_CHECK(old_h == nullptr || h->maybe_unlocking,
+ "Mutex queue changed beneath us");
+
+ // The lock is becoming free, and there's a waiter
+ if (old_h != nullptr &&
+ !old_h->may_skip) { // we used old_h as a terminator
+ old_h->may_skip = true; // allow old_h to skip once more
+ ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
+ if (h != old_h && MuSameCondition(old_h, old_h->next)) {
+ old_h->skip = old_h->next; // old_h not head & can skip to successor
+ }
+ }
+ if (h->next->waitp->how == kExclusive &&
+ Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
+ // easy case: writer with no condition; no need to search
+ pw = h; // wake w, the successor of h (=pw)
+ w = h->next;
+ w->wake = true;
+ // We are waking up a writer. This writer may be racing against
+ // an already awake reader for the lock. We want the
+ // writer to usually win this race,
+ // because if it doesn't, we can potentially keep taking a reader
+ // perpetually and writers will starve. Worse than
+ // that, this can also starve other readers if kMuWrWait gets set
+ // later.
+ wr_wait = kMuWrWait;
+ } else if (w != nullptr && (w->waitp->how == kExclusive || h == old_h)) {
+ // we found a waiter w to wake on a previous iteration and either it's
+ // a writer, or we've searched the entire list so we have all the
+ // readers.
+ if (pw == nullptr) { // if w's predecessor is unknown, it must be h
+ pw = h;
+ }
+ } else {
+ // At this point we don't know all the waiters to wake, and the first
+ // waiter has a condition or is a reader. We avoid searching over
+ // waiters we've searched on previous iterations by starting at
+ // old_h if it's set. If old_h==h, there's no one to wakeup at all.
+ if (old_h == h) { // we've searched before, and nothing's new
+ // so there's no one to wake.
+ intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+ h->readers = 0;
+ h->maybe_unlocking = false; // finished unlocking
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ nv &= kMuLow;
+ if (new_h != nullptr) {
+ nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
+ } // else new_h could be nullptr if we queued ourselves on a
+ // CondVar
+ }
+ // release spinlock & lock
+ // can release with a store because there were waiters
+ mu_.store(nv, std::memory_order_release);
+ break;
+ }
+
+ // set up to walk the list
+ PerThreadSynch *w_walk; // current waiter during list walk
+ PerThreadSynch *pw_walk; // previous waiter during list walk
+ if (old_h != nullptr) { // we've searched up to old_h before
+ pw_walk = old_h;
+ w_walk = old_h->next;
+ } else { // no prior search, start at beginning
+ pw_walk =
+ nullptr; // h->next's predecessor may change; don't record it
+ w_walk = h->next;
+ }
+
+ h->may_skip = false; // ensure we never skip past h in future searches
+ // even if other waiters are queued after it.
+ ABSL_RAW_CHECK(h->skip == nullptr, "illegal skip from head");
+
+ h->maybe_unlocking = true; // we're about to scan the waiter list
+ // without the spinlock held.
+ // Enqueue must be conservative about
+ // priority queuing.
+
+ // We must release the spinlock to evaluate the conditions.
+ mu_.store(v, std::memory_order_release); // release just spinlock
+ // can release with a store because there were waiters
+
+ // h is the last waiter queued, and w_walk the first unsearched waiter.
+ // Without the spinlock, the locations mu_ and h->next may now change
+ // underneath us, but since we hold the lock itself, the only legal
+ // change is to add waiters between h and w_walk. Therefore, it's safe
+ // to walk the path from w_walk to h inclusive. (TryRemove() can remove
+ // a waiter anywhere, but it acquires both the spinlock and the Mutex)
+
+ old_h = h; // remember we searched to here
+
+ // Walk the path upto and including h looking for waiters we can wake.
+ while (pw_walk != h) {
+ w_walk->wake = false;
+ if (w_walk->waitp->cond ==
+ nullptr || // no condition => vacuously true OR
+ (w_walk->waitp->cond != known_false &&
+ // this thread's condition is not known false, AND
+ // is in fact true
+ EvalConditionIgnored(this, w_walk->waitp->cond))) {
+ if (w == nullptr) {
+ w_walk->wake = true; // can wake this waiter
+ w = w_walk;
+ pw = pw_walk;
+ if (w_walk->waitp->how == kExclusive) {
+ wr_wait = kMuWrWait;
+ break; // bail if waking this writer
+ }
+ } else if (w_walk->waitp->how == kShared) { // wake if a reader
+ w_walk->wake = true;
+ } else { // writer with true condition
+ wr_wait = kMuWrWait;
+ }
+ } else { // can't wake; condition false
+ known_false = w_walk->waitp->cond; // remember last false condition
+ }
+ if (w_walk->wake) { // we're waking reader w_walk
+ pw_walk = w_walk; // don't skip similar waiters
+ } else { // not waking; skip as much as possible
+ pw_walk = Skip(w_walk);
+ }
+ // If pw_walk == h, then load of pw_walk->next can race with
+ // concurrent write in Enqueue(). However, at the same time
+ // we do not need to do the load, because we will bail out
+ // from the loop anyway.
+ if (pw_walk != h) {
+ w_walk = pw_walk->next;
+ }
+ }
+
+ continue; // restart for(;;)-loop to wakeup w or to find more waiters
+ }
+ ABSL_RAW_CHECK(pw->next == w, "pw not w's predecessor");
+ // The first (and perhaps only) waiter we've chosen to wake is w, whose
+ // predecessor is pw. If w is a reader, we must wake all the other
+ // waiters with wake==true as well. We may also need to queue
+ // ourselves if waitp != null. The spinlock and the lock are still
+ // held.
+
+ // This traverses the list in [ pw->next, h ], where h is the head,
+ // removing all elements with wake==true and placing them in the
+ // singly-linked list wake_list. Returns the new head.
+ h = DequeueAllWakeable(h, pw, &wake_list);
+
+ intptr_t nv = (v & kMuEvent) | kMuDesig;
+ // assume no waiters left,
+ // set kMuDesig for INV1a
+
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ h = Enqueue(h, waitp, v, kMuIsCond);
+ // h is new last waiter; could be null if we queued ourselves on a
+ // CondVar
+ }
+
+ ABSL_RAW_CHECK(wake_list != kPerThreadSynchNull,
+ "unexpected empty wake list");
+
+ if (h != nullptr) { // there are waiters left
+ h->readers = 0;
+ h->maybe_unlocking = false; // finished unlocking
+ nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
+ }
+
+ // release both spinlock & lock
+ // can release with a store because there were waiters
+ mu_.store(nv, std::memory_order_release);
+ break; // out of for(;;)-loop
+ }
+ c = Delay(c, AGGRESSIVE); // aggressive here; no one can proceed till we do
+ } // end of for(;;)-loop
+
+ if (wake_list != kPerThreadSynchNull) {
+ int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
+ bool cond_waiter = wake_list->cond_waiter;
+ do {
+ wake_list = Wakeup(wake_list); // wake waiters
+ } while (wake_list != kPerThreadSynchNull);
+ if (!cond_waiter) {
+ // Sample lock contention events only if the (first) waiter was trying to
+ // acquire the lock, not waiting on a condition variable or Condition.
+ int64_t wait_cycles = base_internal::CycleClock::Now() - enqueue_timestamp;
+ mutex_tracer("slow release", this, wait_cycles);
+ ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+ submit_profile_data(enqueue_timestamp);
+ ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+ }
+ }
+}
+
+// Used by CondVar implementation to reacquire mutex after waking from
+// condition variable. This routine is used instead of Lock() because the
+// waiting thread may have been moved from the condition variable queue to the
+// mutex queue without a wakeup, by Trans(). In that case, when the thread is
+// finally woken, the woken thread will believe it has been woken from the
+// condition variable (i.e. its PC will be in when in the CondVar code), when
+// in fact it has just been woken from the mutex. Thus, it must enter the slow
+// path of the mutex in the same state as if it had just woken from the mutex.
+// That is, it must ensure to clear kMuDesig (INV1b).
+void Mutex::Trans(MuHow how) {
+ this->LockSlow(how, nullptr, kMuHasBlocked | kMuIsCond);
+}
+
+// Used by CondVar implementation to effectively wake thread w from the
+// condition variable. If this mutex is free, we simply wake the thread.
+// It will later acquire the mutex with high probability. Otherwise, we
+// enqueue thread w on this mutex.
+void Mutex::Fer(PerThreadSynch *w) {
+ int c = 0;
+ ABSL_RAW_CHECK(w->waitp->cond == nullptr,
+ "Mutex::Fer while waiting on Condition");
+ ABSL_RAW_CHECK(!w->waitp->timeout.has_timeout(),
+ "Mutex::Fer while in timed wait");
+ ABSL_RAW_CHECK(w->waitp->cv_word == nullptr,
+ "Mutex::Fer with pending CondVar queueing");
+ for (;;) {
+ intptr_t v = mu_.load(std::memory_order_relaxed);
+ // Note: must not queue if the mutex is unlocked (nobody will wake it).
+ // For example, we can have only kMuWait (conditional) or maybe
+ // kMuWait|kMuWrWait.
+ // conflicting != 0 implies that the waking thread cannot currently take
+ // the mutex, which in turn implies that someone else has it and can wake
+ // us if we queue.
+ const intptr_t conflicting =
+ kMuWriter | (w->waitp->how == kShared ? 0 : kMuReader);
+ if ((v & conflicting) == 0) {
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ IncrementSynchSem(this, w);
+ return;
+ } else {
+ if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ // This thread tries to become the one and only waiter.
+ PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "Enqueue failed"); // we must queue ourselves
+ if (mu_.compare_exchange_strong(
+ v, reinterpret_cast<intptr_t>(new_h) | (v & kMuLow) | kMuWait,
+ std::memory_order_release, std::memory_order_relaxed)) {
+ return;
+ }
+ } else if ((v & kMuSpin) == 0 &&
+ mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
+ PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+ ABSL_RAW_CHECK(new_h != nullptr,
+ "Enqueue failed"); // we must queue ourselves
+ do {
+ v = mu_.load(std::memory_order_relaxed);
+ } while (!mu_.compare_exchange_weak(
+ v,
+ (v & kMuLow & ~kMuSpin) | kMuWait |
+ reinterpret_cast<intptr_t>(new_h),
+ std::memory_order_release, std::memory_order_relaxed));
+ return;
+ }
+ }
+ c = Delay(c, GENTLE);
+ }
+}
+
+void Mutex::AssertHeld() const {
+ if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
+ SynchEvent *e = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
+ static_cast<const void *>(this),
+ (e == nullptr ? "" : e->name));
+ }
+}
+
+void Mutex::AssertReaderHeld() const {
+ if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
+ SynchEvent *e = GetSynchEvent(this);
+ ABSL_RAW_LOG(
+ FATAL, "thread should hold at least a read lock on Mutex %p %s",
+ static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+ }
+}
+
+// -------------------------------- condition variables
+static const intptr_t kCvSpin = 0x0001L; // spinlock protects waiter list
+static const intptr_t kCvEvent = 0x0002L; // record events
+
+static const intptr_t kCvLow = 0x0003L; // low order bits of CV
+
+// Hack to make constant values available to gdb pretty printer
+enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+
+static_assert(PerThreadSynch::kAlignment > kCvLow,
+ "PerThreadSynch::kAlignment must be greater than kCvLow");
+
+void CondVar::EnableDebugLog(const char *name) {
+ SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+ e->log = true;
+ UnrefSynchEvent(e);
+}
+
+CondVar::~CondVar() {
+ if ((cv_.load(std::memory_order_relaxed) & kCvEvent) != 0) {
+ ForgetSynchEvent(&this->cv_, kCvEvent, kCvSpin);
+ }
+}
+
+
+// Remove thread s from the list of waiters on this condition variable.
+void CondVar::Remove(PerThreadSynch *s) {
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed);;
+ v = cv_.load(std::memory_order_relaxed)) {
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
+ cv_.compare_exchange_strong(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h != nullptr) {
+ PerThreadSynch *w = h;
+ while (w->next != s && w->next != h) { // search for thread
+ w = w->next;
+ }
+ if (w->next == s) { // found thread; remove it
+ w->next = s->next;
+ if (h == s) {
+ h = (w == s) ? nullptr : w;
+ }
+ s->next = nullptr;
+ s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ }
+ }
+ // release spinlock
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
+ std::memory_order_release);
+ return;
+ } else {
+ c = Delay(c, GENTLE); // try again after a delay
+ }
+ }
+}
+
+// Queue thread waitp->thread on condition variable word cv_word using
+// wait parameters waitp.
+// We split this into a separate routine, rather than simply doing it as part
+// of WaitCommon(). If we were to queue ourselves on the condition variable
+// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
+// the logging code, or via a Condition function) and might potentially attempt
+// to block this thread. That would be a problem if the thread were already on
+// a the condition variable waiter queue. Thus, we use the waitp->cv_word
+// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
+// condition variable queue just before the mutex is to be unlocked, and (most
+// importantly) after any call to an external routine that might re-enter the
+// mutex code.
+static void CondVarEnqueue(SynchWaitParams *waitp) {
+ // This thread might be transferred to the Mutex queue by Fer() when
+ // we are woken. To make sure that is what happens, Enqueue() doesn't
+ // call CondVarEnqueue() again but instead uses its normal code. We
+ // must do this before we queue ourselves so that cv_word will be null
+ // when seen by the dequeuer, who may wish immediately to requeue
+ // this thread on another queue.
+ std::atomic<intptr_t> *cv_word = waitp->cv_word;
+ waitp->cv_word = nullptr;
+
+ intptr_t v = cv_word->load(std::memory_order_relaxed);
+ int c = 0;
+ while ((v & kCvSpin) != 0 || // acquire spinlock
+ !cv_word->compare_exchange_weak(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ c = Delay(c, GENTLE);
+ v = cv_word->load(std::memory_order_relaxed);
+ }
+ ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h == nullptr) { // add this thread to waiter list
+ waitp->thread->next = waitp->thread;
+ } else {
+ waitp->thread->next = h->next;
+ h->next = waitp->thread;
+ }
+ waitp->thread->state.store(PerThreadSynch::kQueued,
+ std::memory_order_relaxed);
+ cv_word->store((v & kCvEvent) | reinterpret_cast<intptr_t>(waitp->thread),
+ std::memory_order_release);
+}
+
+bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
+ bool rc = false; // return value; true iff we timed-out
+
+ intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
+ Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
+ ABSL_TSAN_MUTEX_PRE_UNLOCK(mutex, TsanFlags(mutex_how));
+
+ // maybe trace this call
+ intptr_t v = cv_.load(std::memory_order_relaxed);
+ cond_var_tracer("Wait", this);
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_WAIT);
+ }
+
+ // Release mu and wait on condition variable.
+ SynchWaitParams waitp(mutex_how, nullptr, t, mutex,
+ Synch_GetPerThreadAnnotated(mutex), &cv_);
+ // UnlockSlow() will call CondVarEnqueue() just before releasing the
+ // Mutex, thus queuing this thread on the condition variable. See
+ // CondVarEnqueue() for the reasons.
+ mutex->UnlockSlow(&waitp);
+
+ // wait for signal
+ while (waitp.thread->state.load(std::memory_order_acquire) ==
+ PerThreadSynch::kQueued) {
+ if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
+ this->Remove(waitp.thread);
+ rc = true;
+ }
+ }
+
+ ABSL_RAW_CHECK(waitp.thread->waitp != nullptr, "not waiting when should be");
+ waitp.thread->waitp = nullptr; // cleanup
+
+ // maybe trace this call
+ cond_var_tracer("Unwait", this);
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_WAIT_RETURNING);
+ }
+
+ // From synchronization point of view Wait is unlock of the mutex followed
+ // by lock of the mutex. We've annotated start of unlock in the beginning
+ // of the function. Now, finish unlock and annotate lock of the mutex.
+ // (Trans is effectively lock).
+ ABSL_TSAN_MUTEX_POST_UNLOCK(mutex, TsanFlags(mutex_how));
+ ABSL_TSAN_MUTEX_PRE_LOCK(mutex, TsanFlags(mutex_how));
+ mutex->Trans(mutex_how); // Reacquire mutex
+ ABSL_TSAN_MUTEX_POST_LOCK(mutex, TsanFlags(mutex_how), 0);
+ return rc;
+}
+
+bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
+ return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+}
+
+bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
+ return WaitCommon(mu, KernelTimeout(deadline));
+}
+
+void CondVar::Wait(Mutex *mu) {
+ WaitCommon(mu, KernelTimeout::Never());
+}
+
+// Wake thread w
+// If it was a timed wait, w will be waiting on w->cv
+// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
+// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
+void CondVar::Wakeup(PerThreadSynch *w) {
+ if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
+ // The waiting thread only needs to observe "w->state == kAvailable" to be
+ // released, we must cache "cvmu" before clearing "next".
+ Mutex *mu = w->waitp->cvmu;
+ w->next = nullptr;
+ w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
+ Mutex::IncrementSynchSem(mu, w);
+ } else {
+ w->waitp->cvmu->Fer(w);
+ }
+}
+
+void CondVar::Signal() {
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(0, 0);
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
+ v = cv_.load(std::memory_order_relaxed)) {
+ if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
+ cv_.compare_exchange_strong(v, v | kCvSpin,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch *w = nullptr;
+ if (h != nullptr) { // remove first waiter
+ w = h->next;
+ if (w == h) {
+ h = nullptr;
+ } else {
+ h->next = w->next;
+ }
+ }
+ // release spinlock
+ cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
+ std::memory_order_release);
+ if (w != nullptr) {
+ CondVar::Wakeup(w); // wake waiter, if there was one
+ cond_var_tracer("Signal wakeup", this);
+ }
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_SIGNAL);
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(0, 0);
+ return;
+ } else {
+ c = Delay(c, GENTLE);
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(0, 0);
+}
+
+void CondVar::SignalAll () {
+ ABSL_TSAN_MUTEX_PRE_SIGNAL(0, 0);
+ intptr_t v;
+ int c = 0;
+ for (v = cv_.load(std::memory_order_relaxed); v != 0;
+ v = cv_.load(std::memory_order_relaxed)) {
+ // empty the list if spinlock free
+ // We do this by simply setting the list to empty using
+ // compare and swap. We then have the entire list in our hands,
+ // which cannot be changing since we grabbed it while no one
+ // held the lock.
+ if ((v & kCvSpin) == 0 &&
+ cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ if (h != nullptr) {
+ PerThreadSynch *w;
+ PerThreadSynch *n = h->next;
+ do { // for every thread, wake it up
+ w = n;
+ n = n->next;
+ CondVar::Wakeup(w);
+ } while (w != h);
+ cond_var_tracer("SignalAll wakeup", this);
+ }
+ if ((v & kCvEvent) != 0) {
+ PostSynchEvent(this, SYNCH_EV_SIGNALALL);
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(0, 0);
+ return;
+ } else {
+ c = Delay(c, GENTLE); // try again after a delay
+ }
+ }
+ ABSL_TSAN_MUTEX_POST_SIGNAL(0, 0);
+}
+
+void ReleasableMutexLock::Release() {
+ ABSL_RAW_CHECK(this->mu_ != nullptr,
+ "ReleasableMutexLock::Release may only be called once");
+ this->mu_->Unlock();
+ this->mu_ = nullptr;
+}
+
+#ifdef THREAD_SANITIZER
+extern "C" void __tsan_read1(void *addr);
+#else
+#define __tsan_read1(addr) // do nothing if TSan not enabled
+#endif
+
+// A function that just returns its argument, dereferenced
+static bool Dereference(void *arg) {
+ // ThreadSanitizer does not instrument this file for memory accesses.
+ // This function dereferences a user variable that can participate
+ // in a data race, so we need to manually tell TSan about this memory access.
+ __tsan_read1(arg);
+ return *(static_cast<bool *>(arg));
+}
+
+Condition::Condition() {} // null constructor, used for kTrue only
+const Condition Condition::kTrue;
+
+Condition::Condition(bool (*func)(void *), void *arg)
+ : eval_(&CallVoidPtrFunction),
+ function_(func),
+ method_(nullptr),
+ arg_(arg) {}
+
+bool Condition::CallVoidPtrFunction(const Condition *c) {
+ return (*c->function_)(c->arg_);
+}
+
+Condition::Condition(const bool *cond)
+ : eval_(CallVoidPtrFunction),
+ function_(Dereference),
+ method_(nullptr),
+ // const_cast is safe since Dereference does not modify arg
+ arg_(const_cast<bool *>(cond)) {}
+
+bool Condition::Eval() const {
+ // eval_ == null for kTrue
+ return (this->eval_ == nullptr) || (*this->eval_)(this);
+}
+
+bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
+ if (a == nullptr) {
+ return b == nullptr || b->eval_ == nullptr;
+ }
+ if (b == nullptr || b->eval_ == nullptr) {
+ return a->eval_ == nullptr;
+ }
+ return a->eval_ == b->eval_ && a->function_ == b->function_ &&
+ a->arg_ == b->arg_ && a->method_ == b->method_;
+}
+
+} // namespace absl
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
new file mode 100644
index 0000000..a417802
--- /dev/null
+++ b/absl/synchronization/mutex.h
@@ -0,0 +1,1013 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// mutex.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `Mutex` -- a mutually exclusive lock -- and the
+// most common type of synchronization primitive for facilitating locks on
+// shared resources. A mutex is used to prevent multiple threads from accessing
+// and/or writing to a shared resource concurrently.
+//
+// Unlike a `std::mutex`, the Abseil `Mutex` provides the following additional
+// features:
+// * Conditional predicates intrinsic to the `Mutex` object
+// * Reader/writer locks, in addition to standard exclusive/writer locks
+// * Deadlock detection and debug support.
+//
+// The following helper classes are also defined within this file:
+//
+// MutexLock - An RAII wrapper to acquire and release a `Mutex` for exclusive/
+// write access within the current scope.
+// ReaderMutexLock
+// - An RAII wrapper to acquire and release a `Mutex` for shared/read
+// access within the current scope.
+//
+// WriterMutexLock
+// - Alias for `MutexLock` above, designed for use in distinguishing
+// reader and writer locks within code.
+//
+// In addition to simple mutex locks, this file also defines ways to perform
+// locking under certain conditions.
+//
+// Condition - (Preferred) Used to wait for a particular predicate that
+// depends on state protected by the `Mutex` to become true.
+// CondVar - A lower-level variant of `Condition` that relies on
+// application code to explicitly signal the `CondVar` when
+// a condition has been met.
+//
+// See below for more information on using `Condition` or `CondVar`.
+//
+// Mutexes and mutex behavior can be quite complicated. The information within
+// this header file is limited, as a result. Please consult the Mutex guide for
+// more complete information and examples.
+
+#ifndef ABSL_SYNCHRONIZATION_MUTEX_H_
+#define ABSL_SYNCHRONIZATION_MUTEX_H_
+
+#include <atomic>
+#include <cstdint>
+#include <string>
+
+#include "absl/base/internal/identity.h"
+#include "absl/base/internal/low_level_alloc.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/port.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+#include "absl/time/time.h"
+
+// Decide if we should use the non-production implementation because
+// the production implementation hasn't been fully ported yet.
+#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
+#error ABSL_INTERNAL_USE_NONPROD_MUTEX cannot be directly set
+#elif defined(ABSL_LOW_LEVEL_ALLOC_MISSING)
+#define ABSL_INTERNAL_USE_NONPROD_MUTEX 1
+#include "absl/synchronization/internal/mutex_nonprod.inc"
+#endif
+
+namespace absl {
+
+struct SynchWaitParams;
+class Condition;
+
+// -----------------------------------------------------------------------------
+// Mutex
+// -----------------------------------------------------------------------------
+//
+// A `Mutex` is a non-reentrant (aka non-recursive) Mutually Exclusive lock
+// on some resource, typically a variable or data structure with associated
+// invariants. Proper usage of mutexes prevents concurrent access by different
+// threads to the same resource.
+//
+// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
+// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
+// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
+// Mutex. During the span of time between the Lock() and Unlock() operations,
+// a mutex is said to be *held*. By design all mutexes support exclusive/write
+// locks, as this is the most common way to use a mutex.
+//
+// The `Mutex` state machine for basic lock/unlock operations is quite simple:
+//
+// | | Lock() | Unlock() |
+// |----------------+------------+----------|
+// | Free | Exclusive | invalid |
+// | Exclusive | blocks | Free |
+//
+// Attempts to `Unlock()` must originate from the thread that performed the
+// corresponding `Lock()` operation.
+//
+// An "invalid" operation is disallowed by the API. The `Mutex` implementation
+// is allowed to do anything on an invalid call, including but not limited to
+// crashing with a useful error message, silently succeeding, or corrupting
+// data structures. In debug mode, the implementation attempts to crash with a
+// useful error message.
+//
+// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
+// is, however, approximately fair over long periods, and starvation-free for
+// threads at the same priority.
+//
+// The lock/unlock primitives are now annotated with lock annotations
+// defined in (base/thread_annotations.h). When writing multi-threaded code,
+// you should use lock annotations whenever possible to document your lock
+// synchronization policy. Besides acting as documentation, these annotations
+// also help compilers or static analysis tools to identify and warn about
+// issues that could potentially result in race conditions and deadlocks.
+//
+// For more information about the lock annotations, please see
+// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
+// in the Clang documentation.
+//
+// See also `MutexLock`, below, for scoped `Mutex` acquisition.
+
+class LOCKABLE Mutex {
+ public:
+ Mutex();
+ ~Mutex();
+
+ // Mutex::Lock()
+ //
+ // Blocks the calling thread, if necessary, until this `Mutex` is free, and
+ // then acquires it exclusively. (This lock is also known as a "write lock.")
+ void Lock() EXCLUSIVE_LOCK_FUNCTION();
+
+ // Mutex::Unlock()
+ //
+ // Releases this `Mutex` and returns it from the exclusive/write state to the
+ // free state. Caller must hold the `Mutex` exclusively.
+ void Unlock() UNLOCK_FUNCTION();
+
+ // Mutex::TryLock()
+ //
+ // If the mutex can be acquired without blocking, does so exclusively and
+ // returns `true`. Otherwise, returns `false`. Returns `true` with high
+ // probability if the `Mutex` was free.
+ bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true);
+
+ // Mutex::AssertHeld()
+ //
+ // Return immediately if this thread holds the `Mutex` exclusively (in write
+ // mode). Otherwise, may report an error (typically by crashing with a
+ // diagnostic), or may return immediately.
+ void AssertHeld() const ASSERT_EXCLUSIVE_LOCK();
+
+ // ---------------------------------------------------------------------------
+ // Reader-Writer Locking
+ // ---------------------------------------------------------------------------
+
+ // A Mutex can also be used as a starvation-free reader-writer lock.
+ // Neither read-locks nor write-locks are reentrant/recursive to avoid
+ // potential client programming errors.
+ //
+ // The Mutex API provides `Writer*()` aliases for the existing `Lock()`,
+ // `Unlock()` and `TryLock()` methods for use within applications mixing
+ // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this
+ // manner can make locking behavior clearer when mixing read and write modes.
+ //
+ // Introducing reader locks necessarily complicates the `Mutex` state
+ // machine somewhat. The table below illustrates the allowed state transitions
+ // of a mutex in such cases. Note that ReaderLock() may block even if the lock
+ // is held in shared mode; this occurs when another thread is blocked on a
+ // call to WriterLock().
+ //
+ // ---------------------------------------------------------------------------
+ // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock()
+ // ---------------------------------------------------------------------------
+ // State
+ // ---------------------------------------------------------------------------
+ // Free Exclusive invalid Shared(1) invalid
+ // Shared(1) blocks invalid Shared(2) or blocks Free
+ // Shared(n) n>1 blocks invalid Shared(n+1) or blocks Shared(n-1)
+ // Exclusive blocks Free blocks invalid
+ // ---------------------------------------------------------------------------
+ //
+ // In comments below, "shared" refers to a state of Shared(n) for any n > 0.
+
+ // Mutex::ReaderLock()
+ //
+ // Blocks the calling thread, if necessary, until this `Mutex` is either free,
+ // or in shared mode, and then acquires a share of it. Note that
+ // `ReaderLock()` will block if some other thread has an exclusive/writer lock
+ // on the mutex.
+
+ void ReaderLock() SHARED_LOCK_FUNCTION();
+
+ // Mutex::ReaderUnlock()
+ //
+ // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
+ // the free state if this thread holds the last reader lock on the mutex. Note
+ // that you cannot call `ReaderUnlock()` on a mutex held in write mode.
+ void ReaderUnlock() UNLOCK_FUNCTION();
+
+ // Mutex::ReaderTryLock()
+ //
+ // If the mutex can be acquired without blocking, acquires this mutex for
+ // shared access and returns `true`. Otherwise, returns `false`. Returns
+ // `true` with high probability if the `Mutex` was free or shared.
+ bool ReaderTryLock() SHARED_TRYLOCK_FUNCTION(true);
+
+ // Mutex::AssertReaderHeld()
+ //
+ // Returns immediately if this thread holds the `Mutex` in at least shared
+ // mode (read mode). Otherwise, may report an error (typically by
+ // crashing with a diagnostic), or may return immediately.
+ void AssertReaderHeld() const ASSERT_SHARED_LOCK();
+
+ // Mutex::WriterLock()
+ // Mutex::WriterUnlock()
+ // Mutex::WriterTryLock()
+ //
+ // Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
+ //
+ // Use the `Writer*()` versions of these method names when using complementary
+ // `Reader*()` methods to distingish simple exclusive `Mutex` usage (`Lock()`,
+ // etc.) from reader/writer lock usage.
+ void WriterLock() EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
+
+ void WriterUnlock() UNLOCK_FUNCTION() { this->Unlock(); }
+
+ bool WriterTryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ return this->TryLock();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Conditional Critical Regions
+ // ---------------------------------------------------------------------------
+
+ // Conditional usage of a `Mutex` can occur using two distinct paradigms:
+ //
+ // * Use of `Mutex` member functions with `Condition` objects.
+ // * Use of the separate `CondVar` abstraction.
+ //
+ // In general, prefer use of `Condition` and the `Mutex` member functions
+ // listed below over `CondVar`. When there are multiple threads waiting on
+ // distinctly different conditions, however, a battery of `CondVar`s may be
+ // more efficient. This section discusses use of `Condition` objects.
+ //
+ // `Mutex` contains member functions for performing lock operations only under
+ // certain conditions, of class `Condition`. For correctness, the `Condition`
+ // must return a boolean that is a pure function, only of state protected by
+ // the `Mutex`. The condition must be invariant w.r.t. environmental state
+ // such as thread, cpu id, or time, and must be `noexcept`. The condition will
+ // always be invoked with the mutex held in at least read mode, so you should
+ // not block it for long periods or sleep it on a timer.
+ //
+ // Since a condition must not depend directly on the current time, use
+ // `*WithTimeout()` member function variants to make your condition
+ // effectively true after a given duration, or `*WithDeadline()` variants to
+ // make your condition effectively true after a given time.
+ //
+ // The condition function should have no side-effects aside from debug
+ // logging; as a special exception, the function may acquire other mutexes
+ // provided it releases all those that it acquires. (This exception was
+ // required to allow logging.)
+
+ // Mutex::Await()
+ //
+ // Unlocks this `Mutex` and blocks until simultaneously both `cond` is `true`
+ // and this `Mutex` can be reacquired, then reacquires this `Mutex` in the
+ // same mode in which it was previously held. If the condition is initially
+ // `true`, `Await()` *may* skip the release/re-acquire step.
+ //
+ // `Await()` requires that this thread holds this `Mutex` in some mode.
+ void Await(const Condition &cond);
+
+ // Mutex::LockWhen()
+ // Mutex::ReaderLockWhen()
+ // Mutex::WriterLockWhen()
+ //
+ // Blocks until simultaneously both `cond` is `true` and this` Mutex` can
+ // be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
+ // logically equivalent to `*Lock(); Await();` though they may have different
+ // performance characteristics.
+ void LockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION();
+
+ void ReaderLockWhen(const Condition &cond) SHARED_LOCK_FUNCTION();
+
+ void WriterLockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION() {
+ this->LockWhen(cond);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Mutex Variants with Timeouts/Deadlines
+ // ---------------------------------------------------------------------------
+
+ // Mutex::AwaitWithTimeout()
+ // Mutex::AwaitWithDeadline()
+ //
+ // If `cond` is initially true, do nothing, or act as though `cond` is
+ // initially false.
+ //
+ // If `cond` is initially false, unlock this `Mutex` and block until
+ // simultaneously:
+ // - either `cond` is true or the {timeout has expired, deadline has passed}
+ // and
+ // - this `Mutex` can be reacquired,
+ // then reacquire this `Mutex` in the same mode in which it was previously
+ // held, returning `true` iff `cond` is `true` on return.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ // Negative timeouts are equivalent to a zero timeout.
+ //
+ // This method requires that this thread holds this `Mutex` in some mode.
+ bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
+
+ bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
+
+ // Mutex::LockWhenWithTimeout()
+ // Mutex::ReaderLockWhenWithTimeout()
+ // Mutex::WriterLockWhenWithTimeout()
+ //
+ // Blocks until simultaneously both:
+ // - either `cond` is `true` or the timeout has expired, and
+ // - this `Mutex` can be acquired,
+ // then atomically acquires this `Mutex`, returning `true` iff `cond` is
+ // `true` on return.
+ //
+ // Negative timeouts are equivalent to a zero timeout.
+ bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ EXCLUSIVE_LOCK_FUNCTION();
+ bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ SHARED_LOCK_FUNCTION();
+ bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ EXCLUSIVE_LOCK_FUNCTION() {
+ return this->LockWhenWithTimeout(cond, timeout);
+ }
+
+ // Mutex::LockWhenWithDeadline()
+ // Mutex::ReaderLockWhenWithDeadline()
+ // Mutex::WriterLockWhenWithDeadline()
+ //
+ // Blocks until simultaneously both:
+ // - either `cond` is `true` or the deadline has been passed, and
+ // - this `Mutex` can be acquired,
+ // then atomically acquires this Mutex, returning `true` iff `cond` is `true`
+ // on return.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ EXCLUSIVE_LOCK_FUNCTION();
+ bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ SHARED_LOCK_FUNCTION();
+ bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ EXCLUSIVE_LOCK_FUNCTION() {
+ return this->LockWhenWithDeadline(cond, deadline);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Debug Support: Invariant Checking, Deadlock Detection, Logging.
+ // ---------------------------------------------------------------------------
+
+ // Mutex::EnableInvariantDebugging()
+ //
+ // If `invariant`!=null and if invariant debugging has been enabled globally,
+ // cause `(*invariant)(arg)` to be called at moments when the invariant for
+ // this `Mutex` should hold (for example: just after acquire, just before
+ // release).
+ //
+ // The routine `invariant` should have no side-effects since it is not
+ // guaranteed how many times it will be called; it should check the invariant
+ // and crash if it does not hold. Enabling global invariant debugging may
+ // substantially reduce `Mutex` performance; it should be set only for
+ // non-production runs. Optimization options may also disable invariant
+ // checks.
+ void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+
+ // Mutex::EnableDebugLog()
+ //
+ // Cause all subsequent uses of this `Mutex` to be logged via
+ // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if no previous
+ // call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
+ //
+ // Note: This method substantially reduces `Mutex` performance.
+ void EnableDebugLog(const char *name);
+
+ // Deadlock detection
+
+ // Mutex::ForgetDeadlockInfo()
+ //
+ // Forget any deadlock-detection information previously gathered
+ // about this `Mutex`. Call this method in debug mode when the lock ordering
+ // of a `Mutex` changes.
+ void ForgetDeadlockInfo();
+
+ // Mutex::AssertNotHeld()
+ //
+ // Return immediately if this thread does not hold this `Mutex` in any
+ // mode; otherwise, may report an error (typically by crashing with a
+ // diagnostic), or may return immediately.
+ //
+ // Currently this check is performed only if all of:
+ // - in debug mode
+ // - SetMutexDeadlockDetectionMode() has been set to kReport or kAbort
+ // - number of locks concurrently held by this thread is not large.
+ // are true.
+ void AssertNotHeld() const;
+
+ // Special cases.
+
+ // A `MuHow` is a constant that indicates how a lock should be acquired.
+ // Internal implementation detail. Clients should ignore.
+ typedef const struct MuHowS *MuHow;
+
+ // Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
+ //
+ // Causes the `Mutex` implementation to prepare itself for re-entry caused by
+ // future use of `Mutex` within a fatal signal handler. This method is
+ // intended for use only for last-ditch attempts to log crash information.
+ // It does not guarantee that attempts to use Mutexes within the handler will
+ // not deadlock; it merely makes other faults less likely.
+ //
+ // WARNING: This routine must be invoked from a signal handler, and the
+ // signal handler must either loop forever or terminate the process.
+ // Attempts to return from (or `longjmp` out of) the signal handler once this
+ // call has been made may cause arbitrary program behaviour including
+ // crashes and deadlocks.
+ static void InternalAttemptToUseMutexInFatalSignalHandler();
+
+ private:
+#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
+ friend class CondVar;
+
+ synchronization_internal::MutexImpl *impl() { return impl_.get(); }
+
+ synchronization_internal::SynchronizationStorage<
+ synchronization_internal::MutexImpl>
+ impl_;
+#else
+ std::atomic<intptr_t> mu_; // The Mutex state.
+
+ // Post()/Wait() versus associated PerThreadSem; in class for required
+ // friendship with PerThreadSem.
+ static inline void IncrementSynchSem(Mutex *mu,
+ base_internal::PerThreadSynch *w);
+ static inline bool DecrementSynchSem(
+ Mutex *mu, base_internal::PerThreadSynch *w,
+ synchronization_internal::KernelTimeout t);
+
+ // slow path acquire
+ void LockSlowLoop(SynchWaitParams *waitp, int flags);
+ // wrappers around LockSlowLoop()
+ bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+ synchronization_internal::KernelTimeout t,
+ int flags);
+ void LockSlow(MuHow how, const Condition *cond,
+ int flags) ABSL_ATTRIBUTE_COLD;
+ // slow path release
+ void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
+ // Common code between Await() and AwaitWithTimeout/Deadline()
+ bool AwaitCommon(const Condition &cond,
+ synchronization_internal::KernelTimeout t);
+ // Attempt to remove thread s from queue.
+ void TryRemove(base_internal::PerThreadSynch *s);
+ // Block a thread on mutex.
+ void Block(base_internal::PerThreadSynch *s);
+ // Wake a thread; return successor.
+ base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+
+ friend class CondVar; // for access to Trans()/Fer().
+ void Trans(MuHow how); // used for CondVar->Mutex transfer
+ void Fer(
+ base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
+#endif
+
+ // Catch the error of writing Mutex when intending MutexLock.
+ Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
+
+ Mutex(const Mutex&) = delete;
+ Mutex& operator=(const Mutex&) = delete;
+};
+
+// -----------------------------------------------------------------------------
+// Mutex RAII Wrappers
+// -----------------------------------------------------------------------------
+
+// MutexLock
+//
+// `MutexLock` is a helper class, which acquires and releases a `Mutex` via
+// RAII.
+//
+// Example:
+//
+// Class Foo {
+//
+// Foo::Bar* Baz() {
+// MutexLock l(&lock_);
+// ...
+// return bar;
+// }
+//
+// private:
+// Mutex lock_;
+// };
+class SCOPED_LOCKABLE MutexLock {
+ public:
+ explicit MutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+ this->mu_->Lock();
+ }
+ ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
+ private:
+ Mutex *const mu_;
+ MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
+ MutexLock& operator=(const MutexLock&) = delete;
+};
+
+// ReaderMutexLock
+//
+// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
+// releases a shared lock on a `Mutex` via RAII.
+class SCOPED_LOCKABLE ReaderMutexLock {
+ public:
+ explicit ReaderMutexLock(Mutex *mu) SHARED_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->ReaderLock();
+ }
+ ~ReaderMutexLock() UNLOCK_FUNCTION() {
+ this->mu_->ReaderUnlock();
+ }
+ private:
+ Mutex *const mu_;
+ ReaderMutexLock(const ReaderMutexLock&) = delete;
+ ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
+};
+
+// WriterMutexLock
+//
+// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
+// releases a write (exclusive) lock on a `Mutex` va RAII.
+class SCOPED_LOCKABLE WriterMutexLock {
+ public:
+ explicit WriterMutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ mu->WriterLock();
+ }
+ ~WriterMutexLock() UNLOCK_FUNCTION() {
+ this->mu_->WriterUnlock();
+ }
+ private:
+ Mutex *const mu_;
+ WriterMutexLock(const WriterMutexLock&) = delete;
+ WriterMutexLock& operator=(const WriterMutexLock&) = delete;
+};
+
+// -----------------------------------------------------------------------------
+// Condition
+// -----------------------------------------------------------------------------
+//
+// As noted above, `Mutex` contains a number of member functions which take a
+// `Condition` as a argument; clients can wait for conditions to become `true`
+// before attempting to acquire the mutex. These sections are known as
+// "condition critical" sections. To use a `Condition`, you simply need to
+// construct it, and use within an appropriate `Mutex` member function;
+// everything else in the `Condition` class is an implementation detail.
+//
+// A `Condition` is specified as a function pointer which returns a boolean.
+// `Condition` functions should be pure functions -- their results should depend
+// only on passed arguments, should not consult any external state (such as
+// clocks), and should have no side-effects, aside from debug logging. Any
+// objects that the function may access should be limited to those which are
+// constant while the mutex is blocked on the condition (e.g. a stack variable),
+// or objects of state protected explicitly by the mutex.
+//
+// No matter which construction is used for `Condition`, the underlying
+// function pointer / functor / callable must not throw any
+// exceptions. Correctness of `Mutex` / `Condition` is not guaranteed in
+// the face of a throwing `Condition`. (When Abseil is allowed to depend
+// on C++17, these function pointers will be explicitly marked
+// `noexcept`; until then this requirement cannot be enforced in the
+// type system.)
+//
+// Note: to use a `Condition`, you need only construct it and pass it within the
+// appropriate `Mutex' member function, such as `Mutex::Await()`.
+//
+// Example:
+//
+// // assume count_ is not internal reference count
+// int count_ GUARDED_BY(mu_);
+//
+// mu_.LockWhen(Condition(+[](const int* count) { return *count == 0; },
+// &count_));
+//
+// When multiple threads are waiting on exactly the same condition, make sure
+// that they are constructed with the same parameters (same pointer to function
+// + arg, or same pointer to object + method), so that the mutex implementation
+// can avoid redundantly evaluating the same condition for each thread.
+class Condition {
+ public:
+ // A Condition that returns the result of "(*func)(arg)"
+ Condition(bool (*func)(void *), void *arg);
+
+ // Templated version for people who are averse to casts.
+ //
+ // To use a lambda, prepend it with unary plus, which converts the lambda
+ // into a function pointer:
+ // Condition(+[](T* t) { return ...; }, arg).
+ //
+ // Note: lambdas in this case must contain no bound variables.
+ //
+ // See class comment for performance advice.
+ template<typename T>
+ Condition(bool (*func)(T *), T *arg);
+
+ // Templated version for invoking a method that returns a `bool`.
+ //
+ // `Condition(object, &Class::Method)` constructs a `Condition` that evaluates
+ // `object->Method()`.
+ //
+ // Implementation Note: `absl::internal::identity` is used to allow methods to
+ // come from base classes. A simpler signature like
+ // `Condition(T*, bool (T::*)())` does not suffice.
+ template<typename T>
+ Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
+
+ // Same as above, for const members
+ template<typename T>
+ Condition(const T *object,
+ bool (absl::internal::identity<T>::type::* method)() const);
+
+ // A Condition that returns the value of `*cond`
+ explicit Condition(const bool *cond);
+
+ // Templated version for invoking a functor that returns a `bool`.
+ // This approach accepts pointers to non-mutable lambdas, `std::function`,
+ // the result of` std::bind` and user-defined functors that define
+ // `bool F::operator()() const`.
+ //
+ // Example:
+ //
+ // auto reached = [this, current]() {
+ // mu_.AssertReaderHeld(); // For annotalysis.
+ // return processed_ >= current;
+ // };
+ // mu_.Await(Condition(&reached));
+
+ // See class comment for performance advice. In particular, if there
+ // might be more than one waiter for the same condition, make sure
+ // that all waiters construct the condition with the same pointers.
+
+ // Implementation note: The second template parameter ensures that this
+ // constructor doesn't participate in overload resolution if T doesn't have
+ // `bool operator() const`.
+ template <typename T, typename E = decltype(
+ static_cast<bool (T::*)() const>(&T::operator()))>
+ explicit Condition(const T *obj)
+ : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
+
+ // A Condition that always returns `true`.
+ static const Condition kTrue;
+
+ // Evaluates the condition.
+ bool Eval() const;
+
+ // Returns `true` if the two conditions are guaranteed to return the same
+ // value if evaluated at the same time, `false` if the evaluation *may* return
+ // different results.
+ //
+ // Two `Condition` values are guaranteed equal if both their `func` and `arg`
+ // components are the same. A null pointer is equivalent to a `true`
+ // condition.
+ static bool GuaranteedEqual(const Condition *a, const Condition *b);
+
+ private:
+ typedef bool (*InternalFunctionType)(void * arg);
+ typedef bool (Condition::*InternalMethodType)();
+ typedef bool (*InternalMethodCallerType)(void * arg,
+ InternalMethodType internal_method);
+
+ bool (*eval_)(const Condition*); // Actual evaluator
+ InternalFunctionType function_; // function taking pointer returning bool
+ InternalMethodType method_; // method returning bool
+ void *arg_; // arg of function_ or object of method_
+
+ Condition(); // null constructor used only to create kTrue
+
+ // Various functions eval_ can point to:
+ static bool CallVoidPtrFunction(const Condition*);
+ template <typename T> static bool CastAndCallFunction(const Condition* c);
+ template <typename T> static bool CastAndCallMethod(const Condition* c);
+};
+
+// -----------------------------------------------------------------------------
+// CondVar
+// -----------------------------------------------------------------------------
+//
+// A condition variable, reflecting state evaluated separately outside of the
+// `Mutex` object, which can be signaled to wake callers.
+// This class is not normally needed; use `Mutex` member functions such as
+// `Mutex::Await()` and intrinsic `Condition` abstractions. In rare cases
+// with many threads and many conditions, `CondVar` may be faster.
+//
+// The implementation may deliver signals to any condition variable at
+// any time, even when no call to `Signal()` or `SignalAll()` is made; as a
+// result, upon being awoken, you must check the logical condition you have
+// been waiting upon. The implementation wakes waiters in the FIFO order.
+//
+// Examples:
+//
+// Usage for a thread waiting for some condition C protected by mutex mu:
+// mu.Lock();
+// while (!C) { cv->Wait(&mu); } // releases and reacquires mu
+// // C holds; process data
+// mu.Unlock();
+//
+// Usage to wake T is:
+// mu.Lock();
+// // process data, possibly establishing C
+// if (C) { cv->Signal(); }
+// mu.Unlock();
+//
+// If C may be useful to more than one waiter, use `SignalAll()` instead of
+// `Signal()`.
+//
+// With this implementation it is efficient to use `Signal()/SignalAll()` inside
+// the locked region; this usage can make reasoning about your program easier.
+//
+class CondVar {
+ public:
+ CondVar();
+ ~CondVar();
+
+ // CondVar::Wait()
+ //
+ // Atomically releases a `Mutex` and blocks on this condition variable. After
+ // blocking, the thread will unblock, reacquire the `Mutex`, and return if
+ // either:
+ // - this condition variable is signalled with `SignalAll()`, or
+ // - this condition variable is signalled in any manner and this thread
+ // was the most recently blocked thread that has not yet woken.
+ // Requires and ensures that the current thread holds the `Mutex`.
+ void Wait(Mutex *mu);
+
+ // CondVar::WaitWithTimeout()
+ //
+ // Atomically releases a `Mutex`, blocks on this condition variable, and
+ // attempts to reacquire the mutex upon being signalled, or upon reaching the
+ // timeout.
+ //
+ // After blocking, the thread will unblock, reacquire the `Mutex`, and return
+ // for any of the following:
+ // - this condition variable is signalled with `SignalAll()`
+ // - the timeout has expired
+ // - this condition variable is signalled in any manner and this thread
+ // was the most recently blocked thread that has not yet woken.
+ //
+ // Negative timeouts are equivalent to a zero timeout.
+ //
+ // Returns true if the timeout has expired without this `CondVar`
+ // being signalled in any manner. If both the timeout has expired
+ // and this `CondVar` has been signalled, the implementation is free
+ // to return `true` or `false`.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
+
+ // CondVar::WaitWithDeadline()
+ //
+ // Atomically releases a `Mutex`, blocks on this condition variable, and
+ // attempts to reacquire the mutex within the provided deadline.
+ //
+ // After blocking, the thread will unblock, reacquire the `Mutex`, and return
+ // for any of the following:
+ // - this condition variable is signalled with `SignalAll()`
+ // - the deadline has passed
+ // - this condition variable is signalled in any manner and this thread
+ // was the most recently blocked thread that has not yet woken.
+ //
+ // Deadlines in the past are equivalent to an immediate deadline.
+ //
+ // Returns true if the deadline has passed without this `CondVar`
+ // being signalled in any manner. If both the deadline has passed
+ // and this `CondVar` has been signalled, the implementation is free
+ // to return `true` or `false`.
+ //
+ // Requires and ensures that the current thread holds the `Mutex`.
+ bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
+
+ // CondVar::Signal()
+ //
+ // Signal this `CondVar`; wake at least one waiter if one exists.
+ void Signal();
+
+ // CondVar::SignalAll()
+ //
+ // Signal this `CondVar`; wake all waiters.
+ void SignalAll();
+
+ // CondVar::EnableDebugLog()
+ //
+ // Causes all subsequent uses of this `CondVar` to be logged via
+ // `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
+ // Note: this method substantially reduces `CondVar` performance.
+ void EnableDebugLog(const char *name);
+
+ private:
+#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
+ synchronization_internal::CondVarImpl *impl() { return impl_.get(); }
+ synchronization_internal::SynchronizationStorage<
+ synchronization_internal::CondVarImpl>
+ impl_;
+#else
+ bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
+ void Remove(base_internal::PerThreadSynch *s);
+ void Wakeup(base_internal::PerThreadSynch *w);
+ std::atomic<intptr_t> cv_; // Condition variable state.
+#endif
+ CondVar(const CondVar&) = delete;
+ CondVar& operator=(const CondVar&) = delete;
+};
+
+
+// Variants of MutexLock.
+//
+// If you find yourself using one of these, consider instead using
+// Mutex::Unlock() and/or if-statements for clarity.
+
+// MutexLockMaybe
+//
+// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
+class SCOPED_LOCKABLE MutexLockMaybe {
+ public:
+ explicit MutexLockMaybe(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) { if (this->mu_ != nullptr) { this->mu_->Lock(); } }
+ ~MutexLockMaybe() UNLOCK_FUNCTION() {
+ if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ }
+ private:
+ Mutex *const mu_;
+ MutexLockMaybe(const MutexLockMaybe&) = delete;
+ MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
+};
+
+// ReleaseableMutexLock
+//
+// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
+// mutex before destruction. `Release()` may be called at most once.
+class SCOPED_LOCKABLE ReleasableMutexLock {
+ public:
+ explicit ReleasableMutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
+ : mu_(mu) {
+ this->mu_->Lock();
+ }
+ ~ReleasableMutexLock() UNLOCK_FUNCTION() {
+ if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ }
+
+ void Release() UNLOCK_FUNCTION();
+
+ private:
+ Mutex *mu_;
+ ReleasableMutexLock(const ReleasableMutexLock&) = delete;
+ ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
+};
+
+#ifdef ABSL_INTERNAL_USE_NONPROD_MUTEX
+#else
+inline CondVar::CondVar() : cv_(0) {}
+#endif
+
+// static
+template <typename T>
+bool Condition::CastAndCallMethod(const Condition *c) {
+ typedef bool (T::*MemberType)();
+ MemberType rm = reinterpret_cast<MemberType>(c->method_);
+ T *x = static_cast<T *>(c->arg_);
+ return (x->*rm)();
+}
+
+// static
+template <typename T>
+bool Condition::CastAndCallFunction(const Condition *c) {
+ typedef bool (*FuncType)(T *);
+ FuncType fn = reinterpret_cast<FuncType>(c->function_);
+ T *x = static_cast<T *>(c->arg_);
+ return (*fn)(x);
+}
+
+template <typename T>
+inline Condition::Condition(bool (*func)(T *), T *arg)
+ : eval_(&CastAndCallFunction<T>),
+ function_(reinterpret_cast<InternalFunctionType>(func)),
+ method_(nullptr),
+ arg_(const_cast<void *>(static_cast<const void *>(arg))) {}
+
+template <typename T>
+inline Condition::Condition(T *object,
+ bool (absl::internal::identity<T>::type::*method)())
+ : eval_(&CastAndCallMethod<T>),
+ function_(nullptr),
+ method_(reinterpret_cast<InternalMethodType>(method)),
+ arg_(object) {}
+
+template <typename T>
+inline Condition::Condition(const T *object,
+ bool (absl::internal::identity<T>::type::*method)()
+ const)
+ : eval_(&CastAndCallMethod<T>),
+ function_(nullptr),
+ method_(reinterpret_cast<InternalMethodType>(method)),
+ arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {}
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a mutex is
+// contended. The callback is given the absl/base/cycleclock.h timestamp when
+// waiting began.
+//
+// Calls to this function do not race or block, but there is no ordering
+// guaranteed between calls to this function and call to the provided hook.
+// In particular, the previously registered hook may still be called for some
+// time after this function returns.
+void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
+
+// Register a hook for Mutex tracing.
+//
+// The function pointer registered here will be called whenever a mutex is
+// contended. The callback is given an opaque handle to the contended mutex,
+// an event name, and the number of wait cycles (as measured by
+// //absl/base/internal/cycleclock.h, and which may not be real
+// "cycle" counts.)
+//
+// The only event name currently sent is "slow release".
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+ int64_t wait_cycles));
+
+// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer()
+// into a single interface, since they are only ever called in pairs.
+
+// Register a hook for CondVar tracing.
+//
+// The function pointer registered here will be called here on various CondVar
+// events. The callback is given an opaque handle to the CondVar object and
+// a std::string identifying the event. This is thread-safe, but only a single
+// tracer can be registered.
+//
+// Events that can be sent are "Wait", "Unwait", "Signal wakeup", and
+// "SignalAll wakeup".
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
+
+// Register a hook for symbolizing stack traces in deadlock detector reports.
+//
+// 'pc' is the program counter being symbolized, 'out' is the buffer to write
+// into, and 'out_size' is the size of the buffer. This function can return
+// false if symbolizing failed, or true if a null-terminated symbol was written
+// to 'out.'
+//
+// This has the same memory ordering concerns as RegisterMutexProfiler() above.
+void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
+
+// EnableMutexInvariantDebugging()
+//
+// Enable or disable global support for Mutex invariant debugging. If enabled,
+// then invariant predicates can be registered per-Mutex for debug checking.
+// See Mutex::EnableInvariantDebugging().
+void EnableMutexInvariantDebugging(bool enabled);
+
+// When in debug mode, and when the feature has been enabled globally, the
+// implementation will keep track of lock ordering and complain (or optionally
+// crash) if a cycle is detected in the acquired-before graph.
+
+// Possible modes of operation for the deadlock detector in debug mode.
+enum class OnDeadlockCycle {
+ kIgnore, // Neither report on nor attempt to track cycles in lock ordering
+ kReport, // Report lock cycles to stderr when detected
+ kAbort, // Report lock cycles to stderr when detected, then abort
+};
+
+// SetMutexDeadlockDetectionMode()
+//
+// Enable or disable global support for detection of potential deadlocks
+// due to Mutex lock ordering inversions. When set to 'kIgnore', tracking of
+// lock ordering is disabled. Otherwise, in debug builds, a lock ordering graph
+// will be maintained internally, and detected cycles will be reported in
+// the manner chosen here.
+void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode);
+
+} // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker. This causes it to flag weak symbol overrides as ODR
+// violations. Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void AbslInternalMutexYield();
+} // extern "C"
+#endif // ABSL_SYNCHRONIZATION_MUTEX_H_
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
new file mode 100644
index 0000000..9cf34fc
--- /dev/null
+++ b/absl/synchronization/mutex_test.cc
@@ -0,0 +1,1538 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/mutex.h"
+
+#ifdef WIN32
+#include <windows.h>
+#endif
+
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <memory>
+#include <random>
+#include <string>
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/sysinfo.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/memory/memory.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+namespace {
+
+// TODO(dmauro): Replace with a commandline flag.
+static constexpr bool kExtendedTest = false;
+
+std::unique_ptr<absl::synchronization_internal::ThreadPool> CreatePool(
+ int threads) {
+ return absl::make_unique<absl::synchronization_internal::ThreadPool>(threads);
+}
+
+std::unique_ptr<absl::synchronization_internal::ThreadPool>
+CreateDefaultPool() {
+ return CreatePool(kExtendedTest ? 32 : 10);
+}
+
+// Hack to schedule a function to run on a thread pool thread after a
+// duration has elapsed.
+static void ScheduleAfter(absl::synchronization_internal::ThreadPool *tp,
+ const std::function<void()> &func,
+ absl::Duration after) {
+ tp->Schedule([func, after] {
+ absl::SleepFor(after);
+ func();
+ });
+}
+
+struct TestContext {
+ int iterations;
+ int threads;
+ int g0; // global 0
+ int g1; // global 1
+ absl::Mutex mu;
+ absl::CondVar cv;
+};
+
+// To test whether the invariant check call occurs
+static std::atomic<bool> invariant_checked;
+
+static bool GetInvariantChecked() {
+ return invariant_checked.load(std::memory_order_relaxed);
+}
+
+static void SetInvariantChecked(bool new_value) {
+ invariant_checked.store(new_value, std::memory_order_relaxed);
+}
+
+static void CheckSumG0G1(void *v) {
+ TestContext *cxt = static_cast<TestContext *>(v);
+ ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in CheckSumG0G1");
+ SetInvariantChecked(true);
+}
+
+static void TestMu(TestContext *cxt, int c) {
+ SetInvariantChecked(false);
+ cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
+ for (int i = 0; i != cxt->iterations; i++) {
+ absl::MutexLock l(&cxt->mu);
+ int a = cxt->g0 + 1;
+ cxt->g0 = a;
+ cxt->g1--;
+ }
+}
+
+static void TestTry(TestContext *cxt, int c) {
+ SetInvariantChecked(false);
+ cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
+ for (int i = 0; i != cxt->iterations; i++) {
+ do {
+ std::this_thread::yield();
+ } while (!cxt->mu.TryLock());
+ int a = cxt->g0 + 1;
+ cxt->g0 = a;
+ cxt->g1--;
+ cxt->mu.Unlock();
+ }
+}
+
+static void TestR20ms(TestContext *cxt, int c) {
+ for (int i = 0; i != cxt->iterations; i++) {
+ absl::ReaderMutexLock l(&cxt->mu);
+ absl::SleepFor(absl::Milliseconds(20));
+ cxt->mu.AssertReaderHeld();
+ }
+}
+
+static void TestRW(TestContext *cxt, int c) {
+ SetInvariantChecked(false);
+ cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
+ if ((c & 1) == 0) {
+ for (int i = 0; i != cxt->iterations; i++) {
+ absl::WriterMutexLock l(&cxt->mu);
+ cxt->g0++;
+ cxt->g1--;
+ cxt->mu.AssertHeld();
+ cxt->mu.AssertReaderHeld();
+ }
+ } else {
+ for (int i = 0; i != cxt->iterations; i++) {
+ absl::ReaderMutexLock l(&cxt->mu);
+ ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in TestRW");
+ cxt->mu.AssertReaderHeld();
+ }
+ }
+}
+
+struct MyContext {
+ int target;
+ TestContext *cxt;
+ bool MyTurn();
+};
+
+bool MyContext::MyTurn() {
+ TestContext *cxt = this->cxt;
+ return cxt->g0 == this->target || cxt->g0 == cxt->iterations;
+}
+
+static void TestAwait(TestContext *cxt, int c) {
+ MyContext mc;
+ mc.target = c;
+ mc.cxt = cxt;
+ absl::MutexLock l(&cxt->mu);
+ cxt->mu.AssertHeld();
+ while (cxt->g0 < cxt->iterations) {
+ cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
+ ABSL_RAW_CHECK(mc.MyTurn(), "Error in TestAwait");
+ cxt->mu.AssertHeld();
+ if (cxt->g0 < cxt->iterations) {
+ int a = cxt->g0 + 1;
+ cxt->g0 = a;
+ mc.target += cxt->threads;
+ }
+ }
+}
+
+static void TestSignalAll(TestContext *cxt, int c) {
+ int target = c;
+ absl::MutexLock l(&cxt->mu);
+ cxt->mu.AssertHeld();
+ while (cxt->g0 < cxt->iterations) {
+ while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
+ cxt->cv.Wait(&cxt->mu);
+ }
+ if (cxt->g0 < cxt->iterations) {
+ int a = cxt->g0 + 1;
+ cxt->g0 = a;
+ cxt->cv.SignalAll();
+ target += cxt->threads;
+ }
+ }
+}
+
+static void TestSignal(TestContext *cxt, int c) {
+ ABSL_RAW_CHECK(cxt->threads == 2, "TestSignal should use 2 threads");
+ int target = c;
+ absl::MutexLock l(&cxt->mu);
+ cxt->mu.AssertHeld();
+ while (cxt->g0 < cxt->iterations) {
+ while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
+ cxt->cv.Wait(&cxt->mu);
+ }
+ if (cxt->g0 < cxt->iterations) {
+ int a = cxt->g0 + 1;
+ cxt->g0 = a;
+ cxt->cv.Signal();
+ target += cxt->threads;
+ }
+ }
+}
+
+static void TestCVTimeout(TestContext *cxt, int c) {
+ int target = c;
+ absl::MutexLock l(&cxt->mu);
+ cxt->mu.AssertHeld();
+ while (cxt->g0 < cxt->iterations) {
+ while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
+ }
+ if (cxt->g0 < cxt->iterations) {
+ int a = cxt->g0 + 1;
+ cxt->g0 = a;
+ cxt->cv.SignalAll();
+ target += cxt->threads;
+ }
+ }
+}
+
+static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
+
+static void TestTime(TestContext *cxt, int c, bool use_cv) {
+ ABSL_RAW_CHECK(cxt->iterations == 1, "TestTime should only use 1 iteration");
+ ABSL_RAW_CHECK(cxt->threads > 2, "TestTime should use more than 2 threads");
+ const bool kFalse = false;
+ absl::Condition false_cond(&kFalse);
+ absl::Condition g0ge2(G0GE2, cxt);
+ if (c == 0) {
+ absl::MutexLock l(&cxt->mu);
+
+ absl::Time start = absl::Now();
+ if (use_cv) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
+ } else {
+ ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
+ "TestTime failed");
+ }
+ absl::Duration elapsed = absl::Now() - start;
+ ABSL_RAW_CHECK(
+ absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
+ "TestTime failed");
+ ABSL_RAW_CHECK(cxt->g0 == 1, "TestTime failed");
+
+ start = absl::Now();
+ if (use_cv) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
+ } else {
+ ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
+ "TestTime failed");
+ }
+ elapsed = absl::Now() - start;
+ ABSL_RAW_CHECK(
+ absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
+ "TestTime failed");
+ cxt->g0++;
+ if (use_cv) {
+ cxt->cv.Signal();
+ }
+
+ start = absl::Now();
+ if (use_cv) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
+ } else {
+ ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)),
+ "TestTime failed");
+ }
+ elapsed = absl::Now() - start;
+ ABSL_RAW_CHECK(
+ absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0),
+ "TestTime failed");
+ ABSL_RAW_CHECK(cxt->g0 >= 3, "TestTime failed");
+
+ start = absl::Now();
+ if (use_cv) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
+ } else {
+ ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
+ "TestTime failed");
+ }
+ elapsed = absl::Now() - start;
+ ABSL_RAW_CHECK(
+ absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
+ "TestTime failed");
+ if (use_cv) {
+ cxt->cv.SignalAll();
+ }
+
+ start = absl::Now();
+ if (use_cv) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
+ } else {
+ ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
+ "TestTime failed");
+ }
+ elapsed = absl::Now() - start;
+ ABSL_RAW_CHECK(absl::Seconds(0.9) <= elapsed &&
+ elapsed <= absl::Seconds(2.0), "TestTime failed");
+ ABSL_RAW_CHECK(cxt->g0 == cxt->threads, "TestTime failed");
+
+ } else if (c == 1) {
+ absl::MutexLock l(&cxt->mu);
+ const absl::Time start = absl::Now();
+ if (use_cv) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
+ } else {
+ ABSL_RAW_CHECK(
+ !cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)),
+ "TestTime failed");
+ }
+ const absl::Duration elapsed = absl::Now() - start;
+ ABSL_RAW_CHECK(
+ absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9),
+ "TestTime failed");
+ cxt->g0++;
+ } else if (c == 2) {
+ absl::MutexLock l(&cxt->mu);
+ if (use_cv) {
+ while (cxt->g0 < 2) {
+ cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
+ }
+ } else {
+ ABSL_RAW_CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)),
+ "TestTime failed");
+ }
+ cxt->g0++;
+ } else {
+ absl::MutexLock l(&cxt->mu);
+ if (use_cv) {
+ while (cxt->g0 < 2) {
+ cxt->cv.Wait(&cxt->mu);
+ }
+ } else {
+ cxt->mu.Await(g0ge2);
+ }
+ cxt->g0++;
+ }
+}
+
+static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); }
+
+static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); }
+
+static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
+ const std::function<void(int)>& cb) {
+ mu->Lock();
+ int c = (*c0)++;
+ mu->Unlock();
+ cb(c);
+ absl::MutexLock l(mu);
+ (*c1)++;
+ cv->Signal();
+}
+
+// Basis for the parameterized tests configured below.
+static int RunTest(void (*test)(TestContext *cxt, int), int threads,
+ int iterations, int operations) {
+ TestContext cxt;
+ absl::Mutex mu2;
+ absl::CondVar cv2;
+ int c0;
+ int c1;
+
+ // run with large thread count for full test and to get timing
+
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ absl::EnableMutexInvariantDebugging(false);
+#endif
+ c0 = 0;
+ c1 = 0;
+ cxt.g0 = 0;
+ cxt.g1 = 0;
+ cxt.iterations = iterations;
+ cxt.threads = threads;
+ absl::synchronization_internal::ThreadPool tp(threads);
+ for (int i = 0; i != threads; i++) {
+ tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
+ std::function<void(int)>(
+ std::bind(test, &cxt, std::placeholders::_1))));
+ }
+ mu2.Lock();
+ while (c1 != threads) {
+ cv2.Wait(&mu2);
+ }
+ mu2.Unlock();
+ int saved_g0 = cxt.g0;
+
+ // run again with small number of iterations to test invariant checking
+
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ absl::EnableMutexInvariantDebugging(true);
+#endif
+ SetInvariantChecked(true);
+ c0 = 0;
+ c1 = 0;
+ cxt.g0 = 0;
+ cxt.g1 = 0;
+ cxt.iterations = (iterations > 10 ? 10 : iterations);
+ cxt.threads = threads;
+ for (int i = 0; i != threads; i++) {
+ tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
+ std::function<void(int)>(
+ std::bind(test, &cxt, std::placeholders::_1))));
+ }
+ mu2.Lock();
+ while (c1 != threads) {
+ cv2.Wait(&mu2);
+ }
+ mu2.Unlock();
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
+#endif
+
+ return saved_g0;
+}
+
+// --------------------------------------------------------
+// Test for fix of bug in TryRemove()
+struct TimeoutBugStruct {
+ absl::Mutex mu;
+ bool a;
+ int a_waiter_count;
+};
+
+static void WaitForA(TimeoutBugStruct *x) {
+ x->mu.LockWhen(absl::Condition(&x->a));
+ x->a_waiter_count--;
+ x->mu.Unlock();
+}
+
+static bool NoAWaiters(TimeoutBugStruct *x) { return x->a_waiter_count == 0; }
+
+// Test that a CondVar.Wait(&mutex) can un-block a call to mutex.Await() in
+// another thread.
+TEST(Mutex, CondVarWaitSignalsAwait) {
+ // Use a struct so the lock annotations apply.
+ struct {
+ absl::Mutex barrier_mu;
+ bool barrier GUARDED_BY(barrier_mu) = false;
+
+ absl::Mutex release_mu;
+ bool release GUARDED_BY(release_mu) = false;
+ absl::CondVar released_cv;
+ } state;
+
+ auto pool = CreateDefaultPool();
+
+ // Thread A. Sets barrier, waits for release using Mutex::Await, then
+ // signals released_cv.
+ pool->Schedule([&state] {
+ state.release_mu.Lock();
+
+ state.barrier_mu.Lock();
+ state.barrier = true;
+ state.barrier_mu.Unlock();
+
+ state.release_mu.Await(absl::Condition(&state.release));
+ state.released_cv.Signal();
+ state.release_mu.Unlock();
+ });
+
+ state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
+ state.barrier_mu.Unlock();
+ state.release_mu.Lock();
+ // Thread A is now blocked on release by way of Mutex::Await().
+
+ // Set release. Calling released_cv.Wait() should un-block thread A,
+ // which will signal released_cv. If not, the test will hang.
+ state.release = true;
+ state.released_cv.Wait(&state.release_mu);
+ state.release_mu.Unlock();
+}
+
+// Test that a CondVar.WaitWithTimeout(&mutex) can un-block a call to
+// mutex.Await() in another thread.
+TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
+ // Use a struct so the lock annotations apply.
+ struct {
+ absl::Mutex barrier_mu;
+ bool barrier GUARDED_BY(barrier_mu) = false;
+
+ absl::Mutex release_mu;
+ bool release GUARDED_BY(release_mu) = false;
+ absl::CondVar released_cv;
+ } state;
+
+ auto pool = CreateDefaultPool();
+
+ // Thread A. Sets barrier, waits for release using Mutex::Await, then
+ // signals released_cv.
+ pool->Schedule([&state] {
+ state.release_mu.Lock();
+
+ state.barrier_mu.Lock();
+ state.barrier = true;
+ state.barrier_mu.Unlock();
+
+ state.release_mu.Await(absl::Condition(&state.release));
+ state.released_cv.Signal();
+ state.release_mu.Unlock();
+ });
+
+ state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
+ state.barrier_mu.Unlock();
+ state.release_mu.Lock();
+ // Thread A is now blocked on release by way of Mutex::Await().
+
+ // Set release. Calling released_cv.Wait() should un-block thread A,
+ // which will signal released_cv. If not, the test will hang.
+ state.release = true;
+ EXPECT_TRUE(
+ !state.released_cv.WaitWithTimeout(&state.release_mu, absl::Seconds(10)))
+ << "; Unrecoverable test failure: CondVar::WaitWithTimeout did not "
+ "unblock the absl::Mutex::Await call in another thread.";
+
+ state.release_mu.Unlock();
+}
+
+// Test for regression of a bug in loop of TryRemove()
+TEST(Mutex, MutexTimeoutBug) {
+ auto tp = CreateDefaultPool();
+
+ TimeoutBugStruct x;
+ x.a = false;
+ x.a_waiter_count = 2;
+ tp->Schedule(std::bind(&WaitForA, &x));
+ tp->Schedule(std::bind(&WaitForA, &x));
+ absl::SleepFor(absl::Seconds(1)); // Allow first two threads to hang.
+ // The skip field of the second will point to the first because there are
+ // only two.
+
+ // Now cause a thread waiting on an always-false to time out
+ // This would deadlock when the bug was present.
+ bool always_false = false;
+ x.mu.LockWhenWithTimeout(absl::Condition(&always_false),
+ absl::Milliseconds(500));
+
+ // if we get here, the bug is not present. Cleanup the state.
+
+ x.a = true; // wakeup the two waiters on A
+ x.mu.Await(absl::Condition(&NoAWaiters, &x)); // wait for them to exit
+ x.mu.Unlock();
+}
+
+struct CondVarWaitDeadlock : testing::TestWithParam<int> {
+ absl::Mutex mu;
+ absl::CondVar cv;
+ bool cond1 = false;
+ bool cond2 = false;
+ bool read_lock1;
+ bool read_lock2;
+ bool signal_unlocked;
+
+ CondVarWaitDeadlock() {
+ read_lock1 = GetParam() & (1 << 0);
+ read_lock2 = GetParam() & (1 << 1);
+ signal_unlocked = GetParam() & (1 << 2);
+ }
+
+ void Waiter1() {
+ if (read_lock1) {
+ mu.ReaderLock();
+ while (!cond1) {
+ cv.Wait(&mu);
+ }
+ mu.ReaderUnlock();
+ } else {
+ mu.Lock();
+ while (!cond1) {
+ cv.Wait(&mu);
+ }
+ mu.Unlock();
+ }
+ }
+
+ void Waiter2() {
+ if (read_lock2) {
+ mu.ReaderLockWhen(absl::Condition(&cond2));
+ mu.ReaderUnlock();
+ } else {
+ mu.LockWhen(absl::Condition(&cond2));
+ mu.Unlock();
+ }
+ }
+};
+
+// Test for a deadlock bug in Mutex::Fer().
+// The sequence of events that lead to the deadlock is:
+// 1. waiter1 blocks on cv in read mode (mu bits = 0).
+// 2. waiter2 blocks on mu in either mode (mu bits = kMuWait).
+// 3. main thread locks mu, sets cond1, unlocks mu (mu bits = kMuWait).
+// 4. main thread signals on cv and this eventually calls Mutex::Fer().
+// Currently Fer wakes waiter1 since mu bits = kMuWait (mutex is unlocked).
+// Before the bug fix Fer neither woke waiter1 nor queued it on mutex,
+// which resulted in deadlock.
+TEST_P(CondVarWaitDeadlock, Test) {
+ auto waiter1 = CreatePool(1);
+ auto waiter2 = CreatePool(1);
+ waiter1->Schedule([this] { this->Waiter1(); });
+ waiter2->Schedule([this] { this->Waiter2(); });
+
+ // Wait while threads block (best-effort is fine).
+ absl::SleepFor(absl::Milliseconds(100));
+
+ // Wake condwaiter.
+ mu.Lock();
+ cond1 = true;
+ if (signal_unlocked) {
+ mu.Unlock();
+ cv.Signal();
+ } else {
+ cv.Signal();
+ mu.Unlock();
+ }
+ waiter1.reset(); // "join" waiter1
+
+ // Wake waiter.
+ mu.Lock();
+ cond2 = true;
+ mu.Unlock();
+ waiter2.reset(); // "join" waiter2
+}
+
+INSTANTIATE_TEST_CASE_P(CondVarWaitDeadlockTest, CondVarWaitDeadlock,
+ ::testing::Range(0, 8),
+ ::testing::PrintToStringParamName());
+
+// --------------------------------------------------------
+// Test for fix of bug in DequeueAllWakeable()
+// Bug was that if there was more than one waiting reader
+// and all should be woken, the most recently blocked one
+// would not be.
+
+struct DequeueAllWakeableBugStruct {
+ absl::Mutex mu;
+ absl::Mutex mu2; // protects all fields below
+ int unfinished_count; // count of unfinished readers; under mu2
+ bool done1; // unfinished_count == 0; under mu2
+ int finished_count; // count of finished readers, under mu2
+ bool done2; // finished_count == 0; under mu2
+};
+
+// Test for regression of a bug in loop of DequeueAllWakeable()
+static void AcquireAsReader(DequeueAllWakeableBugStruct *x) {
+ x->mu.ReaderLock();
+ x->mu2.Lock();
+ x->unfinished_count--;
+ x->done1 = (x->unfinished_count == 0);
+ x->mu2.Unlock();
+ // make sure that both readers acquired mu before we release it.
+ absl::SleepFor(absl::Seconds(2));
+ x->mu.ReaderUnlock();
+
+ x->mu2.Lock();
+ x->finished_count--;
+ x->done2 = (x->finished_count == 0);
+ x->mu2.Unlock();
+}
+
+// Test for regression of a bug in loop of DequeueAllWakeable()
+TEST(Mutex, MutexReaderWakeupBug) {
+ auto tp = CreateDefaultPool();
+
+ DequeueAllWakeableBugStruct x;
+ x.unfinished_count = 2;
+ x.done1 = false;
+ x.finished_count = 2;
+ x.done2 = false;
+ x.mu.Lock(); // acquire mu exclusively
+ // queue two thread that will block on reader locks on x.mu
+ tp->Schedule(std::bind(&AcquireAsReader, &x));
+ tp->Schedule(std::bind(&AcquireAsReader, &x));
+ absl::SleepFor(absl::Seconds(1)); // give time for reader threads to block
+ x.mu.Unlock(); // wake them up
+
+ // both readers should finish promptly
+ EXPECT_TRUE(
+ x.mu2.LockWhenWithTimeout(absl::Condition(&x.done1), absl::Seconds(10)));
+ x.mu2.Unlock();
+
+ EXPECT_TRUE(
+ x.mu2.LockWhenWithTimeout(absl::Condition(&x.done2), absl::Seconds(10)));
+ x.mu2.Unlock();
+}
+
+struct LockWhenTestStruct {
+ absl::Mutex mu1;
+ bool cond = false;
+
+ absl::Mutex mu2;
+ bool waiting = false;
+};
+
+static bool LockWhenTestIsCond(LockWhenTestStruct* s) {
+ s->mu2.Lock();
+ s->waiting = true;
+ s->mu2.Unlock();
+ return s->cond;
+}
+
+static void LockWhenTestWaitForIsCond(LockWhenTestStruct* s) {
+ s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s));
+ s->mu1.Unlock();
+}
+
+TEST(Mutex, LockWhen) {
+ LockWhenTestStruct s;
+
+ // Don't use ThreadPool for this test. See b/65107115.
+ std::thread t(LockWhenTestWaitForIsCond, &s);
+ s.mu2.LockWhen(absl::Condition(&s.waiting));
+ s.mu2.Unlock();
+
+ s.mu1.Lock();
+ s.cond = true;
+ s.mu1.Unlock();
+
+ t.join();
+}
+
+// --------------------------------------------------------
+// The following test requires Mutex::ReaderLock to be a real shared
+// lock, which is not the case in all builds.
+#if !defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
+
+// Test for fix of bug in UnlockSlow() that incorrectly decremented the reader
+// count when putting a thread to sleep waiting for a false condition when the
+// lock was not held.
+
+// For this bug to strike, we make a thread wait on a free mutex with no
+// waiters by causing its wakeup condition to be false. Then the
+// next two acquirers must be readers. The bug causes the lock
+// to be released when one reader unlocks, rather than both.
+
+struct ReaderDecrementBugStruct {
+ bool cond; // to delay first thread (under mu)
+ int done; // reference count (under mu)
+ absl::Mutex mu;
+
+ bool waiting_on_cond; // under mu2
+ bool have_reader_lock; // under mu2
+ bool complete; // under mu2
+ absl::Mutex mu2; // > mu
+};
+
+// L >= mu, L < mu_waiting_on_cond
+static bool IsCond(void *v) {
+ ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
+ x->mu2.Lock();
+ x->waiting_on_cond = true;
+ x->mu2.Unlock();
+ return x->cond;
+}
+
+// L >= mu
+static bool AllDone(void *v) {
+ ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
+ return x->done == 0;
+}
+
+// L={}
+static void WaitForCond(ReaderDecrementBugStruct *x) {
+ absl::Mutex dummy;
+ absl::MutexLock l(&dummy);
+ x->mu.LockWhen(absl::Condition(&IsCond, x));
+ x->done--;
+ x->mu.Unlock();
+}
+
+// L={}
+static void GetReadLock(ReaderDecrementBugStruct *x) {
+ x->mu.ReaderLock();
+ x->mu2.Lock();
+ x->have_reader_lock = true;
+ x->mu2.Await(absl::Condition(&x->complete));
+ x->mu2.Unlock();
+ x->mu.ReaderUnlock();
+ x->mu.Lock();
+ x->done--;
+ x->mu.Unlock();
+}
+
+// Test for reader counter being decremented incorrectly by waiter
+// with false condition.
+TEST(Mutex, MutexReaderDecrementBug) NO_THREAD_SAFETY_ANALYSIS {
+ ReaderDecrementBugStruct x;
+ x.cond = false;
+ x.waiting_on_cond = false;
+ x.have_reader_lock = false;
+ x.complete = false;
+ x.done = 2; // initial ref count
+
+ // Run WaitForCond() and wait for it to sleep
+ std::thread thread1(WaitForCond, &x);
+ x.mu2.LockWhen(absl::Condition(&x.waiting_on_cond));
+ x.mu2.Unlock();
+
+ // Run GetReadLock(), and wait for it to get the read lock
+ std::thread thread2(GetReadLock, &x);
+ x.mu2.LockWhen(absl::Condition(&x.have_reader_lock));
+ x.mu2.Unlock();
+
+ // Get the reader lock ourselves, and release it.
+ x.mu.ReaderLock();
+ x.mu.ReaderUnlock();
+
+ // The lock should be held in read mode by GetReadLock().
+ // If we have the bug, the lock will be free.
+ x.mu.AssertReaderHeld();
+
+ // Wake up all the threads.
+ x.mu2.Lock();
+ x.complete = true;
+ x.mu2.Unlock();
+
+ // TODO(delesley): turn on analysis once lock upgrading is supported.
+ // (This call upgrades the lock from shared to exclusive.)
+ x.mu.Lock();
+ x.cond = true;
+ x.mu.Await(absl::Condition(&AllDone, &x));
+ x.mu.Unlock();
+
+ thread1.join();
+ thread2.join();
+}
+#endif // !ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
+
+// Test that we correctly handle the situation when a lock is
+// held and then destroyed (w/o unlocking).
+TEST(Mutex, LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS {
+ for (int i = 0; i != 10; i++) {
+ // Create, lock and destroy 10 locks.
+ const int kNumLocks = 10;
+ auto mu = absl::make_unique<absl::Mutex[]>(kNumLocks);
+ for (int j = 0; j != kNumLocks; j++) {
+ if ((j % 2) == 0) {
+ mu[j].WriterLock();
+ } else {
+ mu[j].ReaderLock();
+ }
+ }
+ }
+}
+
+// --------------------------------------------------------
+// Test for bug with pattern of readers using a condvar. The bug was that if a
+// reader went to sleep on a condition variable while one or more other readers
+// held the lock, but there were no waiters, the reader count (held in the
+// mutex word) would be lost. (This is because Enqueue() had at one time
+// always placed the thread on the Mutex queue. Later (CL 4075610), to
+// tolerate re-entry into Mutex from a Condition predicate, Enqueue() was
+// changed so that it could also place a thread on a condition-variable. This
+// introduced the case where Enqueue() returned with an empty queue, and this
+// case was handled incorrectly in one place.)
+
+static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
+ int *running) {
+ std::random_device dev;
+ std::mt19937 gen(dev());
+ std::uniform_int_distribution<int> random_millis(0, 15);
+ mu->ReaderLock();
+ while (*running == 3) {
+ absl::SleepFor(absl::Milliseconds(random_millis(gen)));
+ cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
+ }
+ mu->ReaderUnlock();
+ mu->Lock();
+ (*running)--;
+ mu->Unlock();
+}
+
+struct True {
+ template <class... Args>
+ bool operator()(Args...) const {
+ return true;
+ }
+};
+
+struct DerivedTrue : True {};
+
+TEST(Mutex, FunctorCondition) {
+ { // Variadic
+ True f;
+ EXPECT_TRUE(absl::Condition(&f).Eval());
+ }
+
+ { // Inherited
+ DerivedTrue g;
+ EXPECT_TRUE(absl::Condition(&g).Eval());
+ }
+
+ { // lambda
+ int value = 3;
+ auto is_zero = [&value] { return value == 0; };
+ absl::Condition c(&is_zero);
+ EXPECT_FALSE(c.Eval());
+ value = 0;
+ EXPECT_TRUE(c.Eval());
+ }
+
+ { // bind
+ int value = 0;
+ auto is_positive = std::bind(std::less<int>(), 0, std::cref(value));
+ absl::Condition c(&is_positive);
+ EXPECT_FALSE(c.Eval());
+ value = 1;
+ EXPECT_TRUE(c.Eval());
+ }
+
+ { // std::function
+ int value = 3;
+ std::function<bool()> is_zero = [&value] { return value == 0; };
+ absl::Condition c(&is_zero);
+ EXPECT_FALSE(c.Eval());
+ value = 0;
+ EXPECT_TRUE(c.Eval());
+ }
+}
+
+static bool IntIsZero(int *x) { return *x == 0; }
+
+// Test for reader waiting condition variable when there are other readers
+// but no waiters.
+TEST(Mutex, TestReaderOnCondVar) {
+ auto tp = CreateDefaultPool();
+ absl::Mutex mu;
+ absl::CondVar cv;
+ int running = 3;
+ tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
+ tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
+ absl::SleepFor(absl::Seconds(2));
+ mu.Lock();
+ running--;
+ mu.Await(absl::Condition(&IntIsZero, &running));
+ mu.Unlock();
+}
+
+// --------------------------------------------------------
+struct AcquireFromConditionStruct {
+ absl::Mutex mu0; // protects value, done
+ int value; // times condition function is called; under mu0,
+ bool done; // done with test? under mu0
+ absl::Mutex mu1; // used to attempt to mess up state of mu0
+ absl::CondVar cv; // so the condition function can be invoked from
+ // CondVar::Wait().
+};
+
+static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
+ x->value++; // count times this function is called
+
+ if (x->value == 2 || x->value == 3) {
+ // On the second and third invocation of this function, sleep for 100ms,
+ // but with the side-effect of altering the state of a Mutex other than
+ // than one for which this is a condition. The spec now explicitly allows
+ // this side effect; previously it did not. it was illegal.
+ bool always_false = false;
+ x->mu1.LockWhenWithTimeout(absl::Condition(&always_false),
+ absl::Milliseconds(100));
+ x->mu1.Unlock();
+ }
+ ABSL_RAW_CHECK(x->value < 4, "should not be invoked a fourth time");
+
+ // We arrange for the condition to return true on only the 2nd and 3rd calls.
+ return x->value == 2 || x->value == 3;
+}
+
+static void WaitForCond2(AcquireFromConditionStruct *x) {
+ // wait for cond0 to become true
+ x->mu0.LockWhen(absl::Condition(&ConditionWithAcquire, x));
+ x->done = true;
+ x->mu0.Unlock();
+}
+
+// Test for Condition whose function acquires other Mutexes
+TEST(Mutex, AcquireFromCondition) {
+ auto tp = CreateDefaultPool();
+
+ AcquireFromConditionStruct x;
+ x.value = 0;
+ x.done = false;
+ tp->Schedule(
+ std::bind(&WaitForCond2, &x)); // run WaitForCond2() in a thread T
+ // T will hang because the first invocation of ConditionWithAcquire() will
+ // return false.
+ absl::SleepFor(absl::Milliseconds(500)); // allow T time to hang
+
+ x.mu0.Lock();
+ x.cv.WaitWithTimeout(&x.mu0, absl::Milliseconds(500)); // wake T
+ // T will be woken because the Wait() will call ConditionWithAcquire()
+ // for the second time, and it will return true.
+
+ x.mu0.Unlock();
+
+ // T will then acquire the lock and recheck its own condition.
+ // It will find the condition true, as this is the third invocation,
+ // but the use of another Mutex by the calling function will
+ // cause the old mutex implementation to think that the outer
+ // LockWhen() has timed out because the inner LockWhenWithTimeout() did.
+ // T will then check the condition a fourth time because it finds a
+ // timeout occurred. This should not happen in the new
+ // implementation that allows the Condition function to use Mutexes.
+
+ // It should also succeed, even though the Condition function
+ // is being invoked from CondVar::Wait, and thus this thread
+ // is conceptually waiting both on the condition variable, and on mu2.
+
+ x.mu0.LockWhen(absl::Condition(&x.done));
+ x.mu0.Unlock();
+}
+
+// The deadlock detector is not part of non-prod builds, so do not test it.
+#if !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
+
+TEST(Mutex, DeadlockDetector) {
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+
+ // check that we can call ForgetDeadlockInfo() on a lock with the lock held
+ absl::Mutex m1;
+ absl::Mutex m2;
+ absl::Mutex m3;
+ absl::Mutex m4;
+
+ m1.Lock(); // m1 gets ID1
+ m2.Lock(); // m2 gets ID2
+ m3.Lock(); // m3 gets ID3
+ m3.Unlock();
+ m2.Unlock();
+ // m1 still held
+ m1.ForgetDeadlockInfo(); // m1 loses ID
+ m2.Lock(); // m2 gets ID2
+ m3.Lock(); // m3 gets ID3
+ m4.Lock(); // m4 gets ID4
+ m3.Unlock();
+ m2.Unlock();
+ m4.Unlock();
+ m1.Unlock();
+ // Pre b/7636708 the thread local cache remembered that ID1 is assigned to m1.
+ // So, we had a cycle ID1=>ID1=>ID1.
+}
+
+// Bazel has a test "warning" file that programs can write to if the
+// test should pass with a warning. This class disables the warning
+// file until it goes out of scope.
+class ScopedDisableBazelTestWarnings {
+ public:
+ ScopedDisableBazelTestWarnings() {
+#ifdef WIN32
+ char file[MAX_PATH];
+ if (GetEnvironmentVariable(kVarName, file, sizeof(file)) < sizeof(file)) {
+ warnings_output_file_ = file;
+ SetEnvironmentVariable(kVarName, nullptr);
+ }
+#else
+ const char *file = getenv(kVarName);
+ if (file != nullptr) {
+ warnings_output_file_ = file;
+ unsetenv(kVarName);
+ }
+#endif
+ }
+
+ ~ScopedDisableBazelTestWarnings() {
+ if (!warnings_output_file_.empty()) {
+#ifdef WIN32
+ SetEnvironmentVariable(kVarName, warnings_output_file_.c_str());
+#else
+ setenv(kVarName, warnings_output_file_.c_str(), 0);
+#endif
+ }
+ }
+
+ private:
+ static const char kVarName[];
+ std::string warnings_output_file_;
+};
+const char ScopedDisableBazelTestWarnings::kVarName[] =
+ "TEST_WARNINGS_OUTPUT_FILE";
+
+TEST(Mutex, DeadlockDetectorBazelWarning) {
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
+
+ // Cause deadlock detection to detect something, if it's
+ // compiled in and enabled. But turn off the bazel warning.
+ ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
+
+ absl::Mutex mu0;
+ absl::Mutex mu1;
+ bool got_mu0 = mu0.TryLock();
+ mu1.Lock(); // acquire mu1 while holding mu0
+ if (got_mu0) {
+ mu0.Unlock();
+ }
+ if (mu0.TryLock()) { // try lock shouldn't cause deadlock detector to fire
+ mu0.Unlock();
+ }
+ mu0.Lock(); // acquire mu0 while holding mu1; should get one deadlock
+ // report here
+ mu0.Unlock();
+ mu1.Unlock();
+
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+}
+
+// This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
+// annotation-based static thread-safety analysis is not currently
+// predicate-aware and cannot tell if the two for-loops that acquire and
+// release the locks have the same predicates.
+TEST(Mutex, DeadlockDetectorStessTest) NO_THREAD_SAFETY_ANALYSIS {
+ // Stress test: Here we create a large number of locks and use all of them.
+ // If a deadlock detector keeps a full graph of lock acquisition order,
+ // it will likely be too slow for this test to pass.
+ const int n_locks = 1 << 17;
+ auto array_of_locks = absl::make_unique<absl::Mutex[]>(n_locks);
+ for (int i = 0; i < n_locks; i++) {
+ int end = std::min(n_locks, i + 5);
+ // acquire and then release locks i, i+1, ..., i+4
+ for (int j = i; j < end; j++) {
+ array_of_locks[j].Lock();
+ }
+ for (int j = i; j < end; j++) {
+ array_of_locks[j].Unlock();
+ }
+ }
+}
+
+TEST(Mutex, DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS {
+ // Test a scenario where a cached deadlock graph node id in the
+ // list of held locks is not invalidated when the corresponding
+ // mutex is deleted.
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+ // Mutex that will be destroyed while being held
+ absl::Mutex *a = new absl::Mutex;
+ // Other mutexes needed by test
+ absl::Mutex b, c;
+
+ // Hold mutex.
+ a->Lock();
+
+ // Force deadlock id assignment by acquiring another lock.
+ b.Lock();
+ b.Unlock();
+
+ // Delete the mutex. The Mutex destructor tries to remove held locks,
+ // but the attempt isn't foolproof. It can fail if:
+ // (a) Deadlock detection is currently disabled.
+ // (b) The destruction is from another thread.
+ // We exploit (a) by temporarily disabling deadlock detection.
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
+ delete a;
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+
+ // Now acquire another lock which will force a deadlock id assignment.
+ // We should end up getting assigned the same deadlock id that was
+ // freed up when "a" was deleted, which will cause a spurious deadlock
+ // report if the held lock entry for "a" was not invalidated.
+ c.Lock();
+ c.Unlock();
+}
+#endif // !defined(ABSL_INTERNAL_USE_NONPROD_MUTEX)
+
+// --------------------------------------------------------
+// Test for timeouts/deadlines on condition waits that are specified using
+// absl::Duration and absl::Time. For each waiting function we test with
+// a timeout/deadline that has already expired/passed, one that is infinite
+// and so never expires/passes, and one that will expire/pass in the near
+// future.
+
+// Encapsulate a Mutex-protected bool with its associated Condition/CondVar.
+class Cond {
+ public:
+ explicit Cond(bool use_deadline) : use_deadline_(use_deadline), c_(&b_) {}
+
+ void Set(bool v) {
+ absl::MutexLock lock(&mu_);
+ b_ = v;
+ }
+
+ bool AwaitWithTimeout(absl::Duration timeout) {
+ absl::MutexLock lock(&mu_);
+ return use_deadline_ ? mu_.AwaitWithDeadline(c_, absl::Now() + timeout)
+ : mu_.AwaitWithTimeout(c_, timeout);
+ }
+
+ bool LockWhenWithTimeout(absl::Duration timeout) {
+ bool b = use_deadline_ ? mu_.LockWhenWithDeadline(c_, absl::Now() + timeout)
+ : mu_.LockWhenWithTimeout(c_, timeout);
+ mu_.Unlock();
+ return b;
+ }
+
+ bool ReaderLockWhenWithTimeout(absl::Duration timeout) {
+ bool b = use_deadline_
+ ? mu_.ReaderLockWhenWithDeadline(c_, absl::Now() + timeout)
+ : mu_.ReaderLockWhenWithTimeout(c_, timeout);
+ mu_.ReaderUnlock();
+ return b;
+ }
+
+ void Await() {
+ absl::MutexLock lock(&mu_);
+ mu_.Await(c_);
+ }
+
+ void Signal(bool v) {
+ absl::MutexLock lock(&mu_);
+ b_ = v;
+ cv_.Signal();
+ }
+
+ bool WaitWithTimeout(absl::Duration timeout) {
+ absl::MutexLock lock(&mu_);
+ absl::Time deadline = absl::Now() + timeout;
+ if (use_deadline_) {
+ while (!b_ && !cv_.WaitWithDeadline(&mu_, deadline)) {
+ }
+ } else {
+ while (!b_ && !cv_.WaitWithTimeout(&mu_, timeout)) {
+ timeout = deadline - absl::Now(); // recompute timeout
+ }
+ }
+ return b_;
+ }
+
+ void Wait() {
+ absl::MutexLock lock(&mu_);
+ while (!b_) cv_.Wait(&mu_);
+ }
+
+ private:
+ const bool use_deadline_;
+
+ bool b_;
+ absl::Condition c_;
+ absl::CondVar cv_;
+ absl::Mutex mu_;
+};
+
+class OperationTimer {
+ public:
+ OperationTimer() : start_(absl::Now()) {}
+ absl::Duration Get() const { return absl::Now() - start_; }
+
+ private:
+ const absl::Time start_;
+};
+
+static void CheckResults(bool exp_result, bool act_result,
+ absl::Duration exp_duration,
+ absl::Duration act_duration) {
+ ABSL_RAW_CHECK(exp_result == act_result, "CheckResults failed");
+ // Allow for some worse-case scheduling delay and clock skew.
+ ABSL_RAW_CHECK(exp_duration - absl::Milliseconds(40) <= act_duration,
+ "CheckResults failed");
+ ABSL_RAW_CHECK(exp_duration + absl::Milliseconds(150) >= act_duration,
+ "CheckResults failed");
+}
+
+static void TestAwaitTimeout(Cond *cp, absl::Duration timeout, bool exp_result,
+ absl::Duration exp_duration) {
+ OperationTimer t;
+ bool act_result = cp->AwaitWithTimeout(timeout);
+ CheckResults(exp_result, act_result, exp_duration, t.Get());
+}
+
+static void TestLockWhenTimeout(Cond *cp, absl::Duration timeout,
+ bool exp_result, absl::Duration exp_duration) {
+ OperationTimer t;
+ bool act_result = cp->LockWhenWithTimeout(timeout);
+ CheckResults(exp_result, act_result, exp_duration, t.Get());
+}
+
+static void TestReaderLockWhenTimeout(Cond *cp, absl::Duration timeout,
+ bool exp_result,
+ absl::Duration exp_duration) {
+ OperationTimer t;
+ bool act_result = cp->ReaderLockWhenWithTimeout(timeout);
+ CheckResults(exp_result, act_result, exp_duration, t.Get());
+}
+
+static void TestWaitTimeout(Cond *cp, absl::Duration timeout, bool exp_result,
+ absl::Duration exp_duration) {
+ OperationTimer t;
+ bool act_result = cp->WaitWithTimeout(timeout);
+ CheckResults(exp_result, act_result, exp_duration, t.Get());
+}
+
+// Tests with a negative timeout (deadline in the past), which should
+// immediately return the current state of the condition.
+static void TestNegativeTimeouts(absl::synchronization_internal::ThreadPool *tp,
+ Cond *cp) {
+ const absl::Duration negative = -absl::InfiniteDuration();
+ const absl::Duration immediate = absl::ZeroDuration();
+
+ // The condition is already true:
+ cp->Set(true);
+ TestAwaitTimeout(cp, negative, true, immediate);
+ TestLockWhenTimeout(cp, negative, true, immediate);
+ TestReaderLockWhenTimeout(cp, negative, true, immediate);
+ TestWaitTimeout(cp, negative, true, immediate);
+
+ // The condition becomes true, but the timeout has already expired:
+ const absl::Duration delay = absl::Milliseconds(200);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), 3 * delay);
+ TestAwaitTimeout(cp, negative, false, immediate);
+ TestLockWhenTimeout(cp, negative, false, immediate);
+ TestReaderLockWhenTimeout(cp, negative, false, immediate);
+ cp->Await(); // wait for the scheduled Set() to complete
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Signal, cp, true), delay);
+ TestWaitTimeout(cp, negative, false, immediate);
+ cp->Wait(); // wait for the scheduled Signal() to complete
+
+ // The condition never becomes true:
+ cp->Set(false);
+ TestAwaitTimeout(cp, negative, false, immediate);
+ TestLockWhenTimeout(cp, negative, false, immediate);
+ TestReaderLockWhenTimeout(cp, negative, false, immediate);
+ TestWaitTimeout(cp, negative, false, immediate);
+}
+
+// Tests with an infinite timeout (deadline in the infinite future), which
+// should only return when the condition becomes true.
+static void TestInfiniteTimeouts(absl::synchronization_internal::ThreadPool *tp,
+ Cond *cp) {
+ const absl::Duration infinite = absl::InfiniteDuration();
+ const absl::Duration immediate = absl::ZeroDuration();
+
+ // The condition is already true:
+ cp->Set(true);
+ TestAwaitTimeout(cp, infinite, true, immediate);
+ TestLockWhenTimeout(cp, infinite, true, immediate);
+ TestReaderLockWhenTimeout(cp, infinite, true, immediate);
+ TestWaitTimeout(cp, infinite, true, immediate);
+
+ // The condition becomes true before the (infinite) expiry:
+ const absl::Duration delay = absl::Milliseconds(200);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), delay);
+ TestAwaitTimeout(cp, infinite, true, delay);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), delay);
+ TestLockWhenTimeout(cp, infinite, true, delay);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), delay);
+ TestReaderLockWhenTimeout(cp, infinite, true, delay);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Signal, cp, true), delay);
+ TestWaitTimeout(cp, infinite, true, delay);
+}
+
+// Tests with a (small) finite timeout (deadline soon), with the condition
+// becoming true both before and after its expiry.
+static void TestFiniteTimeouts(absl::synchronization_internal::ThreadPool *tp,
+ Cond *cp) {
+ const absl::Duration finite = absl::Milliseconds(400);
+ const absl::Duration immediate = absl::ZeroDuration();
+
+ // The condition is already true:
+ cp->Set(true);
+ TestAwaitTimeout(cp, finite, true, immediate);
+ TestLockWhenTimeout(cp, finite, true, immediate);
+ TestReaderLockWhenTimeout(cp, finite, true, immediate);
+ TestWaitTimeout(cp, finite, true, immediate);
+
+ // The condition becomes true before the expiry:
+ const absl::Duration delay1 = finite / 2;
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), delay1);
+ TestAwaitTimeout(cp, finite, true, delay1);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), delay1);
+ TestLockWhenTimeout(cp, finite, true, delay1);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), delay1);
+ TestReaderLockWhenTimeout(cp, finite, true, delay1);
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Signal, cp, true), delay1);
+ TestWaitTimeout(cp, finite, true, delay1);
+
+ // The condition becomes true, but the timeout has already expired:
+ const absl::Duration delay2 = finite * 2;
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Set, cp, true), 3 * delay2);
+ TestAwaitTimeout(cp, finite, false, finite);
+ TestLockWhenTimeout(cp, finite, false, finite);
+ TestReaderLockWhenTimeout(cp, finite, false, finite);
+ cp->Await(); // wait for the scheduled Set() to complete
+ cp->Set(false);
+ ScheduleAfter(tp, std::bind(&Cond::Signal, cp, true), delay2);
+ TestWaitTimeout(cp, finite, false, finite);
+ cp->Wait(); // wait for the scheduled Signal() to complete
+
+ // The condition never becomes true:
+ cp->Set(false);
+ TestAwaitTimeout(cp, finite, false, finite);
+ TestLockWhenTimeout(cp, finite, false, finite);
+ TestReaderLockWhenTimeout(cp, finite, false, finite);
+ TestWaitTimeout(cp, finite, false, finite);
+}
+
+TEST(Mutex, Timeouts) {
+ auto tp = CreateDefaultPool();
+ for (bool use_deadline : {false, true}) {
+ Cond cond(use_deadline);
+ TestNegativeTimeouts(tp.get(), &cond);
+ TestInfiniteTimeouts(tp.get(), &cond);
+ TestFiniteTimeouts(tp.get(), &cond);
+ }
+}
+
+TEST(Mutex, Logging) {
+ // Allow user to look at logging output
+ absl::Mutex logged_mutex;
+ logged_mutex.EnableDebugLog("fido_mutex");
+ absl::CondVar logged_cv;
+ logged_cv.EnableDebugLog("rover_cv");
+ logged_mutex.Lock();
+ logged_cv.WaitWithTimeout(&logged_mutex, absl::Milliseconds(20));
+ logged_mutex.Unlock();
+ logged_mutex.ReaderLock();
+ logged_mutex.ReaderUnlock();
+ logged_mutex.Lock();
+ logged_mutex.Unlock();
+ logged_cv.Signal();
+ logged_cv.SignalAll();
+}
+
+// --------------------------------------------------------
+
+// Generate the vector of thread counts for tests parameterized on thread count.
+static std::vector<int> AllThreadCountValues() {
+ if (kExtendedTest) {
+ return {2, 4, 8, 10, 16, 20, 24, 30, 32};
+ }
+ return {2, 4, 10};
+}
+
+// A test fixture parameterized by thread count.
+class MutexVariableThreadCountTest : public ::testing::TestWithParam<int> {};
+
+// Instantiate the above with AllThreadCountOptions().
+INSTANTIATE_TEST_CASE_P(ThreadCounts, MutexVariableThreadCountTest,
+ ::testing::ValuesIn(AllThreadCountValues()),
+ ::testing::PrintToStringParamName());
+
+// Reduces iterations by some factor for slow platforms
+// (determined empirically).
+static int ScaleIterations(int x) {
+ // ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE is set in the implementation
+ // of Mutex that uses either std::mutex or pthread_mutex_t. Use
+ // these as keys to determine the slow implementation.
+#if defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
+ return x / 10;
+#else
+ return x;
+#endif
+}
+
+TEST_P(MutexVariableThreadCountTest, Mutex) {
+ int threads = GetParam();
+ int iterations = ScaleIterations(10000000) / threads;
+ int operations = threads * iterations;
+ EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
+}
+
+TEST_P(MutexVariableThreadCountTest, Try) {
+ int threads = GetParam();
+ int iterations = 1000000 / threads;
+ int operations = iterations * threads;
+ EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
+}
+
+TEST_P(MutexVariableThreadCountTest, R20ms) {
+ int threads = GetParam();
+ int iterations = 100;
+ int operations = iterations * threads;
+ EXPECT_EQ(RunTest(&TestR20ms, threads, iterations, operations), 0);
+}
+
+TEST_P(MutexVariableThreadCountTest, RW) {
+ int threads = GetParam();
+ int iterations = ScaleIterations(20000000) / threads;
+ int operations = iterations * threads;
+ EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
+}
+
+TEST_P(MutexVariableThreadCountTest, Await) {
+ int threads = GetParam();
+ int iterations = ScaleIterations(500000);
+ int operations = iterations;
+ EXPECT_EQ(RunTest(&TestAwait, threads, iterations, operations), operations);
+}
+
+TEST_P(MutexVariableThreadCountTest, SignalAll) {
+ int threads = GetParam();
+ int iterations = 200000 / threads;
+ int operations = iterations;
+ EXPECT_EQ(RunTest(&TestSignalAll, threads, iterations, operations),
+ operations);
+}
+
+TEST(Mutex, Signal) {
+ int threads = 2; // TestSignal must use two threads
+ int iterations = 200000;
+ int operations = iterations;
+ EXPECT_EQ(RunTest(&TestSignal, threads, iterations, operations), operations);
+}
+
+TEST(Mutex, Timed) {
+ int threads = 10; // Use a fixed thread count of 10
+ int iterations = 1000;
+ int operations = iterations;
+ EXPECT_EQ(RunTest(&TestCVTimeout, threads, iterations, operations),
+ operations);
+}
+
+TEST(Mutex, CVTime) {
+ int threads = 10; // Use a fixed thread count of 10
+ int iterations = 1;
+ EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1),
+ threads * iterations);
+}
+
+TEST(Mutex, MuTime) {
+ int threads = 10; // Use a fixed thread count of 10
+ int iterations = 1;
+ EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
+}
+
+} // namespace
diff --git a/absl/synchronization/notification.cc b/absl/synchronization/notification.cc
new file mode 100644
index 0000000..ed8cc90
--- /dev/null
+++ b/absl/synchronization/notification.cc
@@ -0,0 +1,84 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/notification.h"
+
+#include <atomic>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/time/time.h"
+
+namespace absl {
+void Notification::Notify() {
+ MutexLock l(&this->mutex_);
+
+#ifndef NDEBUG
+ if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) {
+ ABSL_RAW_LOG(
+ FATAL,
+ "Notify() method called more than once for Notification object %p",
+ static_cast<void *>(this));
+ }
+#endif
+
+ notified_yet_.store(true, std::memory_order_release);
+}
+
+Notification::~Notification() {
+ // Make sure that the thread running Notify() exits before the object is
+ // destructed.
+ MutexLock l(&this->mutex_);
+}
+
+static inline bool HasBeenNotifiedInternal(
+ const std::atomic<bool> *notified_yet) {
+ return notified_yet->load(std::memory_order_acquire);
+}
+
+bool Notification::HasBeenNotified() const {
+ return HasBeenNotifiedInternal(&this->notified_yet_);
+}
+
+void Notification::WaitForNotification() const {
+ if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
+ this->mutex_.LockWhen(Condition(&HasBeenNotifiedInternal,
+ &this->notified_yet_));
+ this->mutex_.Unlock();
+ }
+}
+
+bool Notification::WaitForNotificationWithTimeout(
+ absl::Duration timeout) const {
+ bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
+ if (!notified) {
+ notified = this->mutex_.LockWhenWithTimeout(
+ Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout);
+ this->mutex_.Unlock();
+ }
+ return notified;
+}
+
+bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const {
+ bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
+ if (!notified) {
+ notified = this->mutex_.LockWhenWithDeadline(
+ Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline);
+ this->mutex_.Unlock();
+ }
+ return notified;
+}
+
+} // namespace absl
diff --git a/absl/synchronization/notification.h b/absl/synchronization/notification.h
new file mode 100644
index 0000000..107932f
--- /dev/null
+++ b/absl/synchronization/notification.h
@@ -0,0 +1,112 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// notification.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `Notification` abstraction, which allows threads
+// to receive notification of a single occurrence of a single event.
+//
+// The `Notification` object maintains a private boolean "notified" state that
+// transitions to `true` at most once. The `Notification` class provides the
+// following primary member functions:
+// * `HasBeenNotified() `to query its state
+// * `WaitForNotification*()` to have threads wait until the "notified" state
+// is `true`.
+// * `Notify()` to set the notification's "notified" state to `true` and
+// notify all waiting threads that the event has occurred.
+// This method may only be called once.
+//
+// Note that while `Notify()` may only be called once, it is perfectly valid to
+// call any of the `WaitForNotification*()` methods multiple times, from
+// multiple threads -- even after the notification's "notified" state has been
+// set -- in which case those methods will immediately return.
+//
+// Note that the lifetime of a `Notification` requires careful consideration;
+// it might not be safe to destroy a notification after calling `Notify()` since
+// it is still legal for other threads to call `WaitForNotification*()` methods
+// on the notification. However, observers responding to a "notified" state of
+// `true` can safely delete the notification without interfering with the call
+// to `Notify()` in the other thread.
+//
+// Memory ordering: For any threads X and Y, if X calls `Notify()`, then any
+// action taken by X before it calls `Notify()` is visible to thread Y after:
+// * Y returns from `WaitForNotification()`, or
+// * Y receives a `true` return value from either `HasBeenNotified()` or
+// `WaitForNotificationWithTimeout()`.
+
+#ifndef ABSL_SYNCHRONIZATION_NOTIFICATION_H_
+#define ABSL_SYNCHRONIZATION_NOTIFICATION_H_
+
+#include <atomic>
+
+#include "absl/synchronization/mutex.h"
+#include "absl/time/time.h"
+
+namespace absl {
+
+// -----------------------------------------------------------------------------
+// Notification
+// -----------------------------------------------------------------------------
+class Notification {
+ public:
+ // Initializes the "notified" state to unnotified.
+ Notification() : notified_yet_(false) {}
+ explicit Notification(bool prenotify) : notified_yet_(prenotify) {}
+ Notification(const Notification&) = delete;
+ Notification& operator=(const Notification&) = delete;
+ ~Notification();
+
+ // Notification::HasBeenNotified()
+ //
+ // Returns the value of the notification's internal "notified" state.
+ bool HasBeenNotified() const;
+
+ // Notification::WaitForNotification()
+ //
+ // Blocks the calling thread until the notification's "notified" state is
+ // `true`. Note that if `Notify()` has been previously called on this
+ // notification, this function will immediately return.
+ void WaitForNotification() const;
+
+ // Notification::WaitForNotificationWithTimeout()
+ //
+ // Blocks until either the notification's "notified" state is `true` (which
+ // may occur immediately) or the timeout has elapsed, returning the value of
+ // its "notified" state in either case.
+ bool WaitForNotificationWithTimeout(absl::Duration timeout) const;
+
+ // Notification::WaitForNotificationWithDeadline()
+ //
+ // Blocks until either the notification's "notified" state is `true` (which
+ // may occur immediately) or the deadline has expired, returning the value of
+ // its "notified" state in either case.
+ bool WaitForNotificationWithDeadline(absl::Time deadline) const;
+
+ // Notification::Notify()
+ //
+ // Sets the "notified" state of this notification to `true` and wakes waiting
+ // threads. Note: do not call `Notify()` multiple times on the same
+ // `Notification`; calling `Notify()` more than once on the same notification
+ // results in undefined behavior.
+ void Notify();
+
+ private:
+ mutable Mutex mutex_;
+ std::atomic<bool> notified_yet_; // written under mutex_
+};
+
+} // namespace absl
+#endif // ABSL_SYNCHRONIZATION_NOTIFICATION_H_
diff --git a/absl/synchronization/notification_test.cc b/absl/synchronization/notification_test.cc
new file mode 100644
index 0000000..9b3b6a5
--- /dev/null
+++ b/absl/synchronization/notification_test.cc
@@ -0,0 +1,124 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/notification.h"
+
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+
+// A thread-safe class that holds a counter.
+class ThreadSafeCounter {
+ public:
+ ThreadSafeCounter() : count_(0) {}
+
+ void Increment() {
+ MutexLock lock(&mutex_);
+ ++count_;
+ }
+
+ int Get() const {
+ MutexLock lock(&mutex_);
+ return count_;
+ }
+
+ void WaitUntilGreaterOrEqual(int n) {
+ MutexLock lock(&mutex_);
+ auto cond = [this, n]() { return count_ >= n; };
+ mutex_.Await(Condition(&cond));
+ }
+
+ private:
+ mutable Mutex mutex_;
+ int count_;
+};
+
+// Runs the |i|'th worker thread for the tests in BasicTests(). Increments the
+// |ready_counter|, waits on the |notification|, and then increments the
+// |done_counter|.
+static void RunWorker(int i, ThreadSafeCounter* ready_counter,
+ Notification* notification,
+ ThreadSafeCounter* done_counter) {
+ ready_counter->Increment();
+ notification->WaitForNotification();
+ done_counter->Increment();
+}
+
+// Tests that the |notification| properly blocks and awakens threads. Assumes
+// that the |notification| is not yet triggered. If |notify_before_waiting| is
+// true, the |notification| is triggered before any threads are created, so the
+// threads never block in WaitForNotification(). Otherwise, the |notification|
+// is triggered at a later point when most threads are likely to be blocking in
+// WaitForNotification().
+static void BasicTests(bool notify_before_waiting, Notification* notification) {
+ EXPECT_FALSE(notification->HasBeenNotified());
+ EXPECT_FALSE(
+ notification->WaitForNotificationWithTimeout(absl::Milliseconds(0)));
+ EXPECT_FALSE(notification->WaitForNotificationWithDeadline(absl::Now()));
+
+ absl::Time start = absl::Now();
+ EXPECT_FALSE(
+ notification->WaitForNotificationWithTimeout(absl::Milliseconds(50)));
+ EXPECT_LE(start + absl::Milliseconds(50), absl::Now());
+
+ ThreadSafeCounter ready_counter;
+ ThreadSafeCounter done_counter;
+
+ if (notify_before_waiting) {
+ notification->Notify();
+ }
+
+ // Create a bunch of threads that increment the |done_counter| after being
+ // notified.
+ const int kNumThreads = 10;
+ std::vector<std::thread> workers;
+ for (int i = 0; i < kNumThreads; ++i) {
+ workers.push_back(std::thread(&RunWorker, i, &ready_counter, notification,
+ &done_counter));
+ }
+
+ if (!notify_before_waiting) {
+ ready_counter.WaitUntilGreaterOrEqual(kNumThreads);
+
+ // Workers have not been notified yet, so the |done_counter| should be
+ // unmodified.
+ EXPECT_EQ(0, done_counter.Get());
+
+ notification->Notify();
+ }
+
+ // After notifying and then joining the workers, both counters should be
+ // fully incremented.
+ notification->WaitForNotification(); // should exit immediately
+ EXPECT_TRUE(notification->HasBeenNotified());
+ EXPECT_TRUE(notification->WaitForNotificationWithTimeout(absl::Seconds(0)));
+ EXPECT_TRUE(notification->WaitForNotificationWithDeadline(absl::Now()));
+ for (std::thread& worker : workers) {
+ worker.join();
+ }
+ EXPECT_EQ(kNumThreads, ready_counter.Get());
+ EXPECT_EQ(kNumThreads, done_counter.Get());
+}
+
+TEST(NotificationTest, SanityTest) {
+ Notification local_notification1, local_notification2;
+ BasicTests(false, &local_notification1);
+ BasicTests(true, &local_notification2);
+}
+
+} // namespace absl