aboutsummaryrefslogtreecommitdiffhomepage
path: root/absl/container/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/hashtablez_sampler.cc83
-rw-r--r--absl/container/internal/raw_hash_set.h20
-rw-r--r--absl/container/internal/raw_hash_set_test.cc2
3 files changed, 30 insertions, 75 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 054e898..0a7ef61 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -21,6 +21,7 @@
#include <limits>
#include "absl/base/attributes.h"
+#include "absl/base/internal/exponential_biased.h"
#include "absl/container/internal/have_sse.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
@@ -37,77 +38,13 @@ ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
-// Returns the next pseudo-random value.
-// pRNG is: aX+b mod c with a = 0x5DEECE66D, b = 0xB, c = 1<<48
-// This is the lrand64 generator.
-uint64_t NextRandom(uint64_t rnd) {
- const uint64_t prng_mult = uint64_t{0x5DEECE66D};
- const uint64_t prng_add = 0xB;
- const uint64_t prng_mod_power = 48;
- const uint64_t prng_mod_mask = ~(~uint64_t{0} << prng_mod_power);
- return (prng_mult * rnd + prng_add) & prng_mod_mask;
-}
-
-// Generates a geometric variable with the specified mean.
-// This is done by generating a random number between 0 and 1 and applying
-// the inverse cumulative distribution function for an exponential.
-// Specifically: Let m be the inverse of the sample period, then
-// the probability distribution function is m*exp(-mx) so the CDF is
-// p = 1 - exp(-mx), so
-// q = 1 - p = exp(-mx)
-// log_e(q) = -mx
-// -log_e(q)/m = x
-// log_2(q) * (-log_e(2) * 1/m) = x
-// In the code, q is actually in the range 1 to 2**26, hence the -26 below
-//
-int64_t GetGeometricVariable(int64_t mean) {
#if ABSL_HAVE_THREAD_LOCAL
- thread_local
-#else // ABSL_HAVE_THREAD_LOCAL
- // SampleSlow and hence GetGeometricVariable is guarded by a single mutex when
- // there are not thread locals. Thus, a single global rng is acceptable for
- // that case.
- static
-#endif // ABSL_HAVE_THREAD_LOCAL
- uint64_t rng = []() {
- // We don't get well distributed numbers from this so we call
- // NextRandom() a bunch to mush the bits around. We use a global_rand
- // to handle the case where the same thread (by memory address) gets
- // created and destroyed repeatedly.
- ABSL_CONST_INIT static std::atomic<uint32_t> global_rand(0);
- uint64_t r = reinterpret_cast<uint64_t>(&rng) +
- global_rand.fetch_add(1, std::memory_order_relaxed);
- for (int i = 0; i < 20; ++i) {
- r = NextRandom(r);
- }
- return r;
- }();
-
- rng = NextRandom(rng);
-
- // Take the top 26 bits as the random number
- // (This plus the 1<<58 sampling bound give a max possible step of
- // 5194297183973780480 bytes.)
- const uint64_t prng_mod_power = 48; // Number of bits in prng
- // The uint32_t cast is to prevent a (hard-to-reproduce) NAN
- // under piii debug for some binaries.
- double q = static_cast<uint32_t>(rng >> (prng_mod_power - 26)) + 1.0;
- // Put the computed p-value through the CDF of a geometric.
- double interval = (log2(q) - 26) * (-std::log(2.0) * mean);
-
- // Very large values of interval overflow int64_t. If we happen to
- // hit such improbable condition, we simply cheat and clamp interval
- // to largest supported value.
- if (interval > static_cast<double>(std::numeric_limits<int64_t>::max() / 2)) {
- return std::numeric_limits<int64_t>::max() / 2;
- }
-
- // Small values of interval are equivalent to just sampling next time.
- if (interval < 1) {
- return 1;
- }
- return static_cast<int64_t>(interval);
-}
+thread_local absl::base_internal::ExponentialBiased
+ g_exponential_biased_generator;
+#else
+ABSL_CONST_INIT static absl::base_internal::ExponentialBiased
+ g_exponential_biased_generator;
+#endif
} // namespace
@@ -253,8 +190,12 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
}
bool first = *next_sample < 0;
- *next_sample = GetGeometricVariable(
+ *next_sample = g_exponential_biased_generator.Get(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+ // Small values of interval are equivalent to just sampling next time.
+ if (*next_sample < 1) {
+ *next_sample = 1;
+ }
// g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
// low enough that we will start sampling in a reasonable time, so we just use
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 42b3c46..9992ba4 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -614,13 +614,17 @@ class raw_hash_set {
iterator() {}
// PRECONDITION: not an end() iterator.
- reference operator*() const { return PolicyTraits::element(slot_); }
+ reference operator*() const {
+ /* To be enabled: assert_is_full(); */
+ return PolicyTraits::element(slot_);
+ }
// PRECONDITION: not an end() iterator.
pointer operator->() const { return &operator*(); }
// PRECONDITION: not an end() iterator.
iterator& operator++() {
+ /* To be enabled: assert_is_full(); */
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -634,6 +638,8 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
+ /* To be enabled: a.assert_is_valid(); */
+ /* To be enabled: b.assert_is_valid(); */
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -644,6 +650,11 @@ class raw_hash_set {
iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
+ void assert_is_full() const { assert(IsFull(*ctrl_)); }
+ void assert_is_valid() const {
+ assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel);
+ }
+
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
// ctrl is not necessarily aligned to Group::kWidth. It is also likely
@@ -1155,7 +1166,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- assert(it != end());
+ it.assert_is_full();
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1172,12 +1183,14 @@ class raw_hash_set {
template <typename H, typename E>
void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
assert(this != &src);
- for (auto it = src.begin(), e = src.end(); it != e; ++it) {
+ for (auto it = src.begin(), e = src.end(); it != e;) {
+ auto next = std::next(it);
if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
PolicyTraits::element(it.slot_))
.second) {
src.erase_meta_only(it);
}
+ it = next;
}
}
@@ -1187,6 +1200,7 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
+ position.inner_.assert_is_full();
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index ed4ca8c..33cfa72 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -1837,7 +1837,7 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- constexpr char kDeathMsg[] = "it != end";
+ constexpr char kDeathMsg[] = "IsFull";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}