diff options
author | Abseil Team <absl-team@google.com> | 2018-05-22 16:34:47 -0700 |
---|---|---|
committer | katzdm <katzdm@google.com> | 2018-05-23 11:44:15 -0400 |
commit | 014f02a3eca93ef88163c3b408c86998ecf6572c (patch) | |
tree | 9c0dbee2aba37c06b954813bb649b576a93f4d97 /absl/synchronization | |
parent | 3e671c7821e6f4d80db8089e7e580e5f7448a495 (diff) |
- 45b4111d940009bc7b3ebf621c9cb9293c60344a Use copied value i in lambda by Abseil Team <absl-team@google.com>
- b726b3102f8439b8480b6ff52fc3660162fa0bd9 Fix MSVC compiler warning on 32-bit platforms (GitHub iss... by Derek Mauro <dmauro@google.com>
- a8a29e636c85bd7d82c0cbc824a1c2e498764337 Explicitly forbid to specify the template parameter to Wr... by Abseil Team <absl-team@google.com>
- 566a1d903266fdbfdcf758401c356a0c6703422d Add missing license header to BUILD file. by Alex Strelnikov <strel@google.com>
- ef1c0642cde0bbad62bbb30715256b232a4ab817 Fix BUILD file header. by Alex Strelnikov <strel@google.com>
- b6e2cf00f808ee32b9eb7b3226af79d628742c20 Release GraphCycles microbenchmark. by Alex Strelnikov <strel@google.com>
- f592d78f549e7a242bf2bb4858a26645a655eac3 Release Mutex microbenchmarks. by Alex Strelnikov <strel@google.com>
GitOrigin-RevId: 45b4111d940009bc7b3ebf621c9cb9293c60344a
Change-Id: I82885ae176952a764574c6d4616e312a977407b2
Diffstat (limited to 'absl/synchronization')
-rw-r--r-- | absl/synchronization/BUILD.bazel | 29 | ||||
-rw-r--r-- | absl/synchronization/internal/graphcycles.cc | 28 | ||||
-rw-r--r-- | absl/synchronization/internal/graphcycles_benchmark.cc | 46 | ||||
-rw-r--r-- | absl/synchronization/mutex.cc | 19 | ||||
-rw-r--r-- | absl/synchronization/mutex_benchmark.cc | 96 |
5 files changed, 188 insertions, 30 deletions
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel index 05376903..67ce7ff9 100644 --- a/absl/synchronization/BUILD.bazel +++ b/absl/synchronization/BUILD.bazel @@ -39,6 +39,7 @@ cc_library( ], deps = [ "//absl/base", + "//absl/base:base_internal", "//absl/base:core_headers", "//absl/base:malloc_internal", ], @@ -119,6 +120,20 @@ cc_test( ], ) +cc_test( + name = "graphcycles_benchmark", + srcs = ["internal/graphcycles_benchmark.cc"], + copts = ABSL_TEST_COPTS, + tags = [ + "benchmark", + ], + deps = [ + ":graphcycles_internal", + "//absl/base", + "@com_github_google_benchmark//:benchmark", + ], +) + cc_library( name = "thread_pool", testonly = 1, @@ -149,6 +164,20 @@ cc_test( ) cc_test( + name = "mutex_benchmark", + srcs = ["mutex_benchmark.cc"], + copts = ABSL_TEST_COPTS, + tags = ["benchmark"], + visibility = ["//visibility:private"], + deps = [ + ":synchronization", + ":thread_pool", + "//absl/base", + "@com_github_google_benchmark//:benchmark", + ], +) + +cc_test( name = "notification_test", size = "small", srcs = ["notification_test.cc"], diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc index 28ad172c..ab1f3f84 100644 --- a/absl/synchronization/internal/graphcycles.cc +++ b/absl/synchronization/internal/graphcycles.cc @@ -37,6 +37,7 @@ #include <algorithm> #include <array> +#include "absl/base/internal/hide_ptr.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" @@ -276,18 +277,6 @@ inline uint32_t NodeVersion(GraphId id) { return static_cast<uint32_t>(id.handle >> 32); } -// We need to hide Mutexes (or other deadlock detection's pointers) -// from the leak detector. Xor with an arbitrary number with high bits set. -static const uintptr_t kHideMask = static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll); - -static inline uintptr_t MaskPtr(void *ptr) { - return reinterpret_cast<uintptr_t>(ptr) ^ kHideMask; -} - -static inline void* UnmaskPtr(uintptr_t word) { - return reinterpret_cast<void*>(word ^ kHideMask); -} - struct Node { int32_t rank; // rank number assigned by Pearce-Kelly algorithm uint32_t version; // Current version number @@ -309,7 +298,7 @@ class PointerMap { } int32_t Find(void* ptr) { - auto masked = MaskPtr(ptr); + auto masked = base_internal::HidePtr(ptr); for (int32_t i = table_[Hash(ptr)]; i != -1;) { Node* n = (*nodes_)[i]; if (n->masked_ptr == masked) return i; @@ -327,7 +316,7 @@ class PointerMap { int32_t Remove(void* ptr) { // Advance through linked list while keeping track of the // predecessor slot that points to the current entry. - auto masked = MaskPtr(ptr); + auto masked = base_internal::HidePtr(ptr); for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) { int32_t index = *slot; Node* n = (*nodes_)[index]; @@ -395,7 +384,7 @@ bool GraphCycles::CheckInvariants() const { NodeSet ranks; // Set of ranks seen so far. for (uint32_t x = 0; x < r->nodes_.size(); x++) { Node* nx = r->nodes_[x]; - void* ptr = UnmaskPtr(nx->masked_ptr); + void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr); if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) { ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr); } @@ -427,7 +416,7 @@ GraphId GraphCycles::GetId(void* ptr) { n->version = 1; // Avoid 0 since it is used by InvalidGraphId() n->visited = false; n->rank = rep_->nodes_.size(); - n->masked_ptr = MaskPtr(ptr); + n->masked_ptr = base_internal::HidePtr(ptr); n->nstack = 0; n->priority = 0; rep_->nodes_.push_back(n); @@ -439,7 +428,7 @@ GraphId GraphCycles::GetId(void* ptr) { int32_t r = rep_->free_nodes_.back(); rep_->free_nodes_.pop_back(); Node* n = rep_->nodes_[r]; - n->masked_ptr = MaskPtr(ptr); + n->masked_ptr = base_internal::HidePtr(ptr); n->nstack = 0; n->priority = 0; rep_->ptrmap_.Add(ptr, r); @@ -461,7 +450,7 @@ void GraphCycles::RemoveNode(void* ptr) { } x->in.clear(); x->out.clear(); - x->masked_ptr = MaskPtr(nullptr); + x->masked_ptr = base_internal::HidePtr<void>(nullptr); if (x->version == std::numeric_limits<uint32_t>::max()) { // Cannot use x any more } else { @@ -472,7 +461,8 @@ void GraphCycles::RemoveNode(void* ptr) { void* GraphCycles::Ptr(GraphId id) { Node* n = FindNode(rep_, id); - return n == nullptr ? nullptr : UnmaskPtr(n->masked_ptr); + return n == nullptr ? nullptr + : base_internal::UnhidePtr<void>(n->masked_ptr); } bool GraphCycles::HasNode(GraphId node) { diff --git a/absl/synchronization/internal/graphcycles_benchmark.cc b/absl/synchronization/internal/graphcycles_benchmark.cc new file mode 100644 index 00000000..b4a1debe --- /dev/null +++ b/absl/synchronization/internal/graphcycles_benchmark.cc @@ -0,0 +1,46 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/synchronization/internal/graphcycles.h" + +#include <algorithm> +#include <cstdint> +#include <vector> + +#include "benchmark/benchmark.h" +#include "absl/base/internal/raw_logging.h" + +namespace { + +void BM_StressTest(benchmark::State& state) { + const int num_nodes = state.range(0); + while (state.KeepRunningBatch(num_nodes)) { + absl::synchronization_internal::GraphCycles g; + std::vector<absl::synchronization_internal::GraphId> nodes(num_nodes); + for (int i = 0; i < num_nodes; i++) { + nodes[i] = g.GetId(reinterpret_cast<void*>(static_cast<uintptr_t>(i))); + } + for (int i = 0; i < num_nodes; i++) { + int end = std::min(num_nodes, i + 5); + for (int j = i + 1; j < end; j++) { + ABSL_RAW_CHECK(g.InsertEdge(nodes[i], nodes[j]), ""); + } + } + } +} +BENCHMARK(BM_StressTest)->Range(2048, 1048576); + +} // namespace + +BENCHMARK_MAIN(); diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index bd54a4dc..e6525046 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -43,6 +43,7 @@ #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/hide_ptr.h" #include "absl/base/internal/low_level_alloc.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" @@ -272,13 +273,6 @@ static absl::base_internal::SpinLock synch_event_mu( // Can't be too small, as it's used for deadlock detection information. static const uint32_t kNSynchEvent = 1031; -// We need to hide Mutexes (or other deadlock detection's pointers) -// from the leak detector. -static const uintptr_t kHideMask = static_cast<uintptr_t>(0xF03A5F7BF03A5F7BLL); -static uintptr_t MaskMu(const void *mu) { - return reinterpret_cast<uintptr_t>(mu) ^ kHideMask; -} - static struct SynchEvent { // this is a trivial hash table for the events // struct is freed when refcount reaches 0 int refcount GUARDED_BY(synch_event_mu); @@ -314,7 +308,8 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, SynchEvent *e; // first look for existing SynchEvent struct.. synch_event_mu.Lock(); - for (e = synch_event[h]; e != nullptr && e->masked_addr != MaskMu(addr); + for (e = synch_event[h]; + e != nullptr && e->masked_addr != base_internal::HidePtr(addr); e = e->next) { } if (e == nullptr) { // no SynchEvent struct found; make one. @@ -325,7 +320,7 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, e = reinterpret_cast<SynchEvent *>( base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l)); e->refcount = 2; // one for return value, one for linked list - e->masked_addr = MaskMu(addr); + e->masked_addr = base_internal::HidePtr(addr); e->invariant = nullptr; e->arg = nullptr; e->log = false; @@ -367,7 +362,8 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits, SynchEvent *e; synch_event_mu.Lock(); for (pe = &synch_event[h]; - (e = *pe) != nullptr && e->masked_addr != MaskMu(addr); pe = &e->next) { + (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr); + pe = &e->next) { } bool del = false; if (e != nullptr) { @@ -388,7 +384,8 @@ static SynchEvent *GetSynchEvent(const void *addr) { uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent; SynchEvent *e; synch_event_mu.Lock(); - for (e = synch_event[h]; e != nullptr && e->masked_addr != MaskMu(addr); + for (e = synch_event[h]; + e != nullptr && e->masked_addr != base_internal::HidePtr(addr); e = e->next) { } if (e != nullptr) { diff --git a/absl/synchronization/mutex_benchmark.cc b/absl/synchronization/mutex_benchmark.cc new file mode 100644 index 00000000..d91071b7 --- /dev/null +++ b/absl/synchronization/mutex_benchmark.cc @@ -0,0 +1,96 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <vector> + +#include "benchmark/benchmark.h" +#include "absl/base/internal/sysinfo.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/internal/thread_pool.h" +#include "absl/synchronization/mutex.h" + +namespace { + +// Measure the overhead of conditions on mutex release (when they must be +// evaluated). Mutex has (some) support for equivalence classes allowing +// Conditions with the same function/argument to potentially not be multiply +// evaluated. +// +// num_classes==0 is used for the special case of every waiter being distinct. +void BM_ConditionWaiters(benchmark::State& state) { + int num_classes = state.range(0); + int num_waiters = state.range(1); + + struct Helper { + static void Waiter(absl::BlockingCounter* init, absl::Mutex* m, int* p) { + init->DecrementCount(); + m->LockWhen(absl::Condition( + static_cast<bool (*)(int*)>([](int* v) { return *v == 0; }), p)); + m->Unlock(); + } + }; + + if (num_classes == 0) { + // No equivalence classes. + num_classes = num_waiters; + } + + absl::BlockingCounter init(num_waiters); + absl::Mutex mu; + std::vector<int> equivalence_classes(num_classes, 1); + + // Must be declared last to be destroyed first. + absl::synchronization_internal::ThreadPool pool(num_waiters); + + for (int i = 0; i < num_waiters; i++) { + // Mutex considers Conditions with the same function and argument + // to be equivalent. + pool.Schedule([&, i] { + Helper::Waiter(&init, &mu, &equivalence_classes[i % num_classes]); + }); + } + init.Wait(); + + for (auto _ : state) { + mu.Lock(); + mu.Unlock(); // Each unlock requires Condition evaluation for our waiters. + } + + mu.Lock(); + for (int i = 0; i < num_classes; i++) { + equivalence_classes[i] = 0; + } + mu.Unlock(); +} + +#ifdef THREAD_SANITIZER +// ThreadSanitizer can't handle 8192 threads. +constexpr int kMaxConditionWaiters = 2048; +#else +constexpr int kMaxConditionWaiters = 8192; +#endif +BENCHMARK(BM_ConditionWaiters)->RangePair(0, 2, 1, kMaxConditionWaiters); + +void BM_ContendedMutex(benchmark::State& state) { + static absl::Mutex* mu = new absl::Mutex; + for (auto _ : state) { + absl::MutexLock lock(mu); + } +} +BENCHMARK(BM_ContendedMutex)->Threads(1); +BENCHMARK(BM_ContendedMutex)->ThreadPerCpu(); + +} // namespace + +BENCHMARK_MAIN(); |