diff options
author | Abseil Team <absl-team@google.com> | 2019-01-24 07:23:40 -0800 |
---|---|---|
committer | Xiaoyi Zhang <zhangxy988@gmail.com> | 2019-01-24 11:10:30 -0500 |
commit | 0dffca4e36791c7beeda04da10460b534283948a (patch) | |
tree | e2884680d3dfa3fa5b7b4557c900a643dd989b50 | |
parent | 6b4201f9ef650637510a21b8d6cbcc3bee4a606f (diff) |
Export of internal Abseil changes.
--
5804cc13b413412988248835459b90cd15ec43d9 by Abseil Team <absl-team@google.com>:
Mark raw_hash_set::clear() with the ABSL_ATTRIBUTE_REINITIALIZES attribute.
This prevents false positives in the clang-tidy check bugprone-use-after-move;
it allows reset() to be called on a moved-from raw_hash_set without any
warnings, and the raw_hash_set will thereafter be regarded as initialized again.
PiperOrigin-RevId: 230717196
--
ff5961a5600ae19b69a9cba6912126cdf2858f38 by CJ Johnson <johnsoncj@google.com>:
Swaps DisableIfIntegral<> for EnableIfInputIterator<> for Iterator member functions of InlinedVector
PiperOrigin-RevId: 230559521
--
3f9754ccbeecbd40f235c6f2465279e045ff51d9 by Derek Mauro <dmauro@google.com>:
Import GitHub PR 254
https://github.com/abseil/abseil-cpp/pull/254
Fixes warnings from -Wclass-memaccess (base_internal::ThreadIdentity?
with no trivial copy-assignment).
PiperOrigin-RevId: 230536048
--
8af03a654ce9a4a7f55384bc7eb1ed64878ac2ec by Chris Kennelly <ckennelly@google.com>:
absl: cap SpinLock backoff to 4ms
The current backoff logic has 3 problems:
1. It can produce too high values (up to 256ms), which can negatively
affect tail latency. The value was chosen long time ago and now it's
a good idea to reconsider it.
2. It does not have low bound, so on any iteration it can produce
a very small value that will lead to unnecessary cpu consumption.
3. It does not increase low bound with the number of iterations.
So if the SpinLock is actually somehow locked for a very prolonged time,
a waiter can still wake periodically.
Rework the logic to solve these problems.
Add lower bound of 128us, no code should rely on absence of episodic
delays in this range as they can occur everywhere.
Lower upper bound to 4ms. A thread sleeping for 4ms does not consume
significant cpu time (see below).
Grow lower bound with the number of iterations.
This is cpu consumption of a process doing usleep(x) in a loop
(sampled with ps):
64us -> 4.0%
128us -> 2.7%
256us -> 3.5%
512us -> 2.8%
1024us -> 1.6%
2048us -> 0.6%
4096us -> 0.3%
8192us -> 0.0%
Few millisecond sleeps do not consume significant time.
PiperOrigin-RevId: 230534015
--
37ebba92289ca556cb2412cd8b3cb4c1ead3def7 by Samuel Benzaquen <sbenza@google.com>:
Add override and dispose hooks to the hashtable sampler.
PiperOrigin-RevId: 230353438
--
89c8f90175233ce9964eb3412df04e8a3cff0c0f by Andy Getzendanner <durandal@google.com>:
Fix a comment typo.
PiperOrigin-RevId: 229986838
GitOrigin-RevId: 5804cc13b413412988248835459b90cd15ec43d9
Change-Id: Iedb5e2cc9c0b924635c1c87b537780ab6b5b899f
-rw-r--r-- | absl/base/internal/spinlock_linux.inc | 17 | ||||
-rw-r--r-- | absl/base/internal/spinlock_wait.cc | 13 | ||||
-rw-r--r-- | absl/container/BUILD.bazel | 28 | ||||
-rw-r--r-- | absl/container/CMakeLists.txt | 25 | ||||
-rw-r--r-- | absl/container/inlined_vector.h | 10 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_force_sampling.cc | 24 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_force_sampling_test.cc | 60 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 16 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.h | 16 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_force_weak_definition.cc | 27 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler_test.cc | 25 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 2 | ||||
-rw-r--r-- | absl/time/clock.cc | 2 |
13 files changed, 236 insertions, 29 deletions
diff --git a/absl/base/internal/spinlock_linux.inc b/absl/base/internal/spinlock_linux.inc index 94c861dc..3bbd4954 100644 --- a/absl/base/internal/spinlock_linux.inc +++ b/absl/base/internal/spinlock_linux.inc @@ -51,17 +51,12 @@ extern "C" { ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( std::atomic<uint32_t> *w, uint32_t value, int loop, absl::base_internal::SchedulingMode) { - if (loop != 0) { - int save_errno = errno; - struct timespec tm; - tm.tv_sec = 0; - // Increase the delay; we expect (but do not rely on) explicit wakeups. - // We don't rely on explicit wakeups because we intentionally allow for - // a race on the kSpinLockSleeper bit. - tm.tv_nsec = 16 * absl::base_internal::SpinLockSuggestedDelayNS(loop); - syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); - errno = save_errno; - } + int save_errno = errno; + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); + errno = save_errno; } ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, diff --git a/absl/base/internal/spinlock_wait.cc b/absl/base/internal/spinlock_wait.cc index 365a7939..7e4f4352 100644 --- a/absl/base/internal/spinlock_wait.cc +++ b/absl/base/internal/spinlock_wait.cc @@ -65,17 +65,14 @@ int SpinLockSuggestedDelayNS(int loop) { r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() delay_rand.store(r, std::memory_order_relaxed); - r <<= 16; // 48-bit random number now in top 48-bits. if (loop < 0 || loop > 32) { // limit loop to 0..32 loop = 32; } - // loop>>3 cannot exceed 4 because loop cannot exceed 32. - // Select top 20..24 bits of lower 48 bits, - // giving approximately 0ms to 16ms. - // Mean is exponential in loop for first 32 iterations, then 8ms. - // The futex path multiplies this by 16, since we expect explicit wakeups - // almost always on that path. - return static_cast<int>(r >> (44 - (loop >> 3))); + const int kMinDelay = 128 << 10; // 128us + // Double delay every 8 iterations, up to 16x (2ms). + int delay = kMinDelay << (loop / 8); + // Randomize in delay..2*delay range, for resulting 128us..4ms range. + return delay | ((delay - 1) & static_cast<int>(r)); } } // namespace base_internal diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 87fc7349..acdbc473 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -447,8 +447,20 @@ cc_library( ) cc_library( + name = "hashtablez_force_sampling", + srcs = ["internal/hashtablez_force_sampling.cc"], + copts = ABSL_DEFAULT_COPTS, + deps = [ + ":hashtablez_sampler", + ], +) + +cc_library( name = "hashtablez_sampler", - srcs = ["internal/hashtablez_sampler.cc"], + srcs = [ + "internal/hashtablez_sampler.cc", + "internal/hashtablez_sampler_force_weak_definition.cc", + ], hdrs = ["internal/hashtablez_sampler.h"], copts = ABSL_DEFAULT_COPTS, deps = [ @@ -476,6 +488,20 @@ cc_test( ], ) +cc_test( + name = "hashtablez_force_sampling_test", + srcs = ["internal/hashtablez_force_sampling_test.cc"], + tags = [ + "no_test_darwin_x86_64", + "no_test_msvc_x64", + ], + deps = [ + ":hashtablez_force_sampling", + ":hashtablez_sampler", + "@com_google_googletest//:gtest_main", + ], +) + cc_library( name = "node_hash_policy", hdrs = ["internal/node_hash_policy.h"], diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 21c9cb95..822388bd 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -444,6 +444,7 @@ absl_cc_library( "internal/hashtablez_sampler.h" SRCS "internal/hashtablez_sampler.cc" + "internal/hashtablez_sampler_force_weak_definition.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS @@ -465,6 +466,30 @@ absl_cc_test( absl_cc_library( NAME + hashtablez_force_sampling + SRCS + "internal/hashtablez_force_sampling.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::base + absl::have_sse + absl::synchronization +) + +absl_cc_test( + NAME + hashtablez_force_sampling_test + SRCS + "internal/hashtablez_force_sampling_test.cc" + DEPS + absl::hashtablez_force_sampling + absl::hashtablez_sampler + gmock_main +) + +absl_cc_library( + NAME hashtable_debug HDRS "internal/hashtable_debug.h" diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 649e904a..6625d43f 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -82,10 +82,6 @@ class InlinedVector { std::forward_iterator_tag>; template <typename Iterator> - using DisableIfIntegral = - absl::enable_if_t<!std::is_integral<Iterator>::value>; - - template <typename Iterator> using EnableIfAtLeastInputIterator = absl::enable_if_t<IsAtLeastInputIterator<Iterator>::value>; @@ -152,7 +148,8 @@ class InlinedVector { // NOTE: The `enable_if` prevents ambiguous interpretation between a call to // this constructor with two integral arguments and a call to the above // `InlinedVector(size_type, const_reference)` constructor. - template <typename InputIterator, DisableIfIntegral<InputIterator>* = nullptr> + template <typename InputIterator, + EnableIfAtLeastInputIterator<InputIterator>* = nullptr> InlinedVector(InputIterator first, InputIterator last, const allocator_type& alloc = allocator_type()) : allocator_and_tag_(alloc) { @@ -516,7 +513,8 @@ class InlinedVector { // Overload of `InlinedVector::assign()` to replace the contents of the // inlined vector with values constructed from the range [`first`, `last`). - template <typename InputIterator, DisableIfIntegral<InputIterator>* = nullptr> + template <typename InputIterator, + EnableIfAtLeastInputIterator<InputIterator>* = nullptr> void assign(InputIterator first, InputIterator last) { AssignRange(first, last); } diff --git a/absl/container/internal/hashtablez_force_sampling.cc b/absl/container/internal/hashtablez_force_sampling.cc new file mode 100644 index 00000000..868976ec --- /dev/null +++ b/absl/container/internal/hashtablez_force_sampling.cc @@ -0,0 +1,24 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +namespace absl { +namespace container_internal { + +// See hashtablez_sampler.h for details. +extern "C" const bool kAbslContainerInternalSampleEverything = true; + +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hashtablez_force_sampling_test.cc b/absl/container/internal/hashtablez_force_sampling_test.cc new file mode 100644 index 00000000..9ff1046a --- /dev/null +++ b/absl/container/internal/hashtablez_force_sampling_test.cc @@ -0,0 +1,60 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include <cstddef> + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hashtablez_sampler.h" + +namespace absl { +namespace container_internal { + +class HashtablezInfoHandlePeer { + public: + static bool IsSampled(const HashtablezInfoHandle& h) { + return h.info_ != nullptr; + } +}; + +namespace { + +bool samples[3]{true, true, true}; + +// We do this test in a global object to test that this works even before main. +struct Global { + Global() { + // By default it is sampled. + samples[0] = HashtablezInfoHandlePeer::IsSampled(Sample()); + + // Even with a large parameter, it is sampled. + SetHashtablezSampleParameter(100); + samples[1] = HashtablezInfoHandlePeer::IsSampled(Sample()); + + // Even if we turn it off, it is still sampled. + SetHashtablezEnabled(false); + samples[2] = HashtablezInfoHandlePeer::IsSampled(Sample()); + } +} global; + +TEST(kAbslContainerInternalSampleEverything, Works) { + EXPECT_THAT(samples, testing::Each(true)); + EXPECT_TRUE(kAbslContainerInternalSampleEverything); + // One more after main() + EXPECT_TRUE(HashtablezInfoHandlePeer::IsSampled(Sample())); +} + +} // namespace +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 1ba95645..7c411140 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -116,6 +116,11 @@ HashtablezSampler& HashtablezSampler::Global() { return *sampler; } +HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback( + DisposeCallback f) { + return dispose_.exchange(f, std::memory_order_relaxed); +} + HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } HashtablezInfo::~HashtablezInfo() = default; @@ -138,7 +143,7 @@ void HashtablezInfo::PrepareForSampling() { } HashtablezSampler::HashtablezSampler() - : dropped_samples_(0), size_estimate_(0), all_(nullptr) { + : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { absl::MutexLock l(&graveyard_.init_mu); graveyard_.dead = &graveyard_; } @@ -161,6 +166,10 @@ void HashtablezSampler::PushNew(HashtablezInfo* sample) { } void HashtablezSampler::PushDead(HashtablezInfo* sample) { + if (auto* dispose = dispose_.load(std::memory_order_relaxed)) { + dispose(*sample); + } + absl::MutexLock graveyard_lock(&graveyard_.init_mu); absl::MutexLock sample_lock(&sample->init_mu); sample->dead = graveyard_.dead; @@ -220,6 +229,11 @@ int64_t HashtablezSampler::Iterate( } HashtablezInfo* SampleSlow(int64_t* next_sample) { + if (kAbslContainerInternalSampleEverything) { + *next_sample = 1; + return HashtablezSampler::Global().Register(); + } + bool first = *next_sample < 0; *next_sample = GetGeometricVariable( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index c42f1842..126a0ade 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -183,6 +183,13 @@ class HashtablezSampler { // Unregisters the sample. void Unregister(HashtablezInfo* sample); + // The dispose callback will be called on all samples the moment they are + // being unregistered. Only affects samples that are unregistered after the + // callback has been set. + // Returns the previous callback. + using DisposeCallback = void (*)(const HashtablezInfo&); + DisposeCallback SetDisposeCallback(DisposeCallback f); + // Iterates over all the registered `StackInfo`s. Returning the number of // samples that have been dropped. int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f); @@ -222,6 +229,8 @@ class HashtablezSampler { // std::atomic<HashtablezInfo*> all_; HashtablezInfo graveyard_; + + std::atomic<DisposeCallback> dispose_; }; // Enables or disables sampling for Swiss tables. @@ -233,6 +242,13 @@ void SetHashtablezSampleParameter(int32_t rate); // Sets a soft max for the number of samples that will be kept. void SetHashtablezMaxSamples(int32_t max); +// Configuration override. +// This allows process-wide sampling without depending on order of +// initialization of static storage duration objects. +// The definition of this constant is weak, which allows us to inject a +// different value for it at link time. +extern "C" const bool kAbslContainerInternalSampleEverything; + } // namespace container_internal } // namespace absl diff --git a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc new file mode 100644 index 00000000..38a3f260 --- /dev/null +++ b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -0,0 +1,27 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include "absl/base/attributes.h" + +namespace absl { +namespace container_internal { + +// See hashtablez_sampler.h for details. +extern "C" ABSL_ATTRIBUTE_WEAK const bool + kAbslContainerInternalSampleEverything = false; + +} // namespace container_internal +} // namespace absl diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index 31e7641a..f9ee941a 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -302,6 +302,31 @@ TEST(HashtablezSamplerTest, MultiThreaded) { stop.Notify(); } +TEST(HashtablezSamplerTest, Callback) { + HashtablezSampler sampler; + + auto* info1 = Register(&sampler, 1); + auto* info2 = Register(&sampler, 2); + + static const HashtablezInfo* expected; + + auto callback = [](const HashtablezInfo& info) { + // We can't use `info` outside of this callback because the object will be + // disposed as soon as we return from here. + EXPECT_EQ(&info, expected); + }; + + // Set the callback. + EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr); + expected = info1; + sampler.Unregister(info1); + + // Unset the callback. + EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr)); + expected = nullptr; // no more calls. + sampler.Unregister(info2); +} + } // namespace } // namespace container_internal } // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 8f6469ff..8e3fa02d 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -1035,7 +1035,7 @@ class raw_hash_set { size_t capacity() const { return capacity_; } size_t max_size() const { return (std::numeric_limits<size_t>::max)(); } - void clear() { + ABSL_ATTRIBUTE_REINITIALIZES void clear() { // Iterating over this container is O(bucket_count()). When bucket_count() // is much greater than size(), iteration becomes prohibitively expensive. // For clear() it is more important to reuse the allocated array when the diff --git a/absl/time/clock.cc b/absl/time/clock.cc index 74ee1401..4863f643 100644 --- a/absl/time/clock.cc +++ b/absl/time/clock.cc @@ -379,7 +379,7 @@ static uint64_t UpdateLastSample( // // Manually mark this 'noinline' to minimize stack frame size of the fast // path. Without this, sometimes a compiler may inline this big block of code -// into the fast past. That causes lots of register spills and reloads that +// into the fast path. That causes lots of register spills and reloads that // are unnecessary unless the slow path is taken. // // TODO(absl-team): Remove this attribute when our compiler is smart enough |