From b2dc72c17ac663885b62334d334da9f8970543b5 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Mon, 13 Sep 2021 17:04:32 -0700 Subject: Export of internal Abseil changes -- ca3a0009e675b699b5d6dd41f00ebac0e7d1935c by Derek Mauro : Internal change PiperOrigin-RevId: 396475923 -- 04d9fff79085bb18612af3da49007907394ae0b6 by Abseil Team : Move HastablezSampler from container/internal to profiling/internal. PiperOrigin-RevId: 396362093 GitOrigin-RevId: ca3a0009e675b699b5d6dd41f00ebac0e7d1935c Change-Id: I42d6d2944786afa24259fde002fed5e611f4e1f9 --- absl/profiling/internal/sample_recorder.h | 231 ++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) create mode 100644 absl/profiling/internal/sample_recorder.h (limited to 'absl/profiling/internal/sample_recorder.h') diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h new file mode 100644 index 00000000..a257ea5d --- /dev/null +++ b/absl/profiling/internal/sample_recorder.h @@ -0,0 +1,231 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: sample_recorder.h +// ----------------------------------------------------------------------------- +// +// This header file defines a lock-free linked list for recording samples +// collected from a random/stochastic process. +// +// This utility is internal-only. Use at your own risk. + +#ifndef ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_ +#define ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/thread_annotations.h" +#include "absl/synchronization/mutex.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace profiling_internal { + +// Sample that has members required for linking samples in the linked list of +// samples maintained by the SampleRecorder. Type T defines the sampled data. +template +struct Sample { + public: + // Guards the ability to restore the sample to a pristine state. This + // prevents races with sampling and resurrecting an object. + absl::Mutex init_mu; + T* next = nullptr; + T* dead ABSL_GUARDED_BY(init_mu) = nullptr; +}; + +// Holds samples and their associated stack traces with a soft limit of +// `SetHashtablezMaxSamples()`. +// +// Thread safe. +template +class SampleRecorder { + public: + SampleRecorder(); + ~SampleRecorder(); + + // Registers for sampling. Returns an opaque registration info. + T* Register(); + + // Unregisters the sample. + void Unregister(T* sample); + + // The dispose callback will be called on all samples the moment they are + // being unregistered. Only affects samples that are unregistered after the + // callback has been set. + // Returns the previous callback. + using DisposeCallback = void (*)(const T&); + DisposeCallback SetDisposeCallback(DisposeCallback f); + + // Iterates over all the registered `StackInfo`s. Returning the number of + // samples that have been dropped. + int64_t Iterate(const std::function& f); + + void SetMaxSamples(int32_t max); + + private: + void PushNew(T* sample); + void PushDead(T* sample); + T* PopDead(); + + std::atomic dropped_samples_; + std::atomic size_estimate_; + std::atomic max_samples_{1 << 20}; + + // Intrusive lock free linked lists for tracking samples. + // + // `all_` records all samples (they are never removed from this list) and is + // terminated with a `nullptr`. + // + // `graveyard_.dead` is a circular linked list. When it is empty, + // `graveyard_.dead == &graveyard`. The list is circular so that + // every item on it (even the last) has a non-null dead pointer. This allows + // `Iterate` to determine if a given sample is live or dead using only + // information on the sample itself. + // + // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead + // looks like this (G is the Graveyard): + // + // +---+ +---+ +---+ +---+ +---+ + // all -->| A |--->| B |--->| C |--->| D |--->| E | + // | | | | | | | | | | + // +---+ | | +->| |-+ | | +->| |-+ | | + // | G | +---+ | +---+ | +---+ | +---+ | +---+ + // | | | | | | + // | | --------+ +--------+ | + // +---+ | + // ^ | + // +--------------------------------------+ + // + std::atomic all_; + T graveyard_; + + std::atomic dispose_; +}; + +template +typename SampleRecorder::DisposeCallback +SampleRecorder::SetDisposeCallback(DisposeCallback f) { + return dispose_.exchange(f, std::memory_order_relaxed); +} + +template +SampleRecorder::SampleRecorder() + : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { + absl::MutexLock l(&graveyard_.init_mu); + graveyard_.dead = &graveyard_; +} + +template +SampleRecorder::~SampleRecorder() { + T* s = all_.load(std::memory_order_acquire); + while (s != nullptr) { + T* next = s->next; + delete s; + s = next; + } +} + +template +void SampleRecorder::PushNew(T* sample) { + sample->next = all_.load(std::memory_order_relaxed); + while (!all_.compare_exchange_weak(sample->next, sample, + std::memory_order_release, + std::memory_order_relaxed)) { + } +} + +template +void SampleRecorder::PushDead(T* sample) { + if (auto* dispose = dispose_.load(std::memory_order_relaxed)) { + dispose(*sample); + } + + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + absl::MutexLock sample_lock(&sample->init_mu); + sample->dead = graveyard_.dead; + graveyard_.dead = sample; +} + +template +T* SampleRecorder::PopDead() { + absl::MutexLock graveyard_lock(&graveyard_.init_mu); + + // The list is circular, so eventually it collapses down to + // graveyard_.dead == &graveyard_ + // when it is empty. + T* sample = graveyard_.dead; + if (sample == &graveyard_) return nullptr; + + absl::MutexLock sample_lock(&sample->init_mu); + graveyard_.dead = sample->dead; + sample->dead = nullptr; + sample->PrepareForSampling(); + return sample; +} + +template +T* SampleRecorder::Register() { + int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); + if (size > max_samples_.load(std::memory_order_relaxed)) { + size_estimate_.fetch_sub(1, std::memory_order_relaxed); + dropped_samples_.fetch_add(1, std::memory_order_relaxed); + return nullptr; + } + + T* sample = PopDead(); + if (sample == nullptr) { + // Resurrection failed. Hire a new warlock. + sample = new T(); + PushNew(sample); + } + + return sample; +} + +template +void SampleRecorder::Unregister(T* sample) { + PushDead(sample); + size_estimate_.fetch_sub(1, std::memory_order_relaxed); +} + +template +int64_t SampleRecorder::Iterate( + const std::function& f) { + T* s = all_.load(std::memory_order_acquire); + while (s != nullptr) { + absl::MutexLock l(&s->init_mu); + if (s->dead == nullptr) { + f(*s); + } + s = s->next; + } + + return dropped_samples_.load(std::memory_order_relaxed); +} + +template +void SampleRecorder::SetMaxSamples(int32_t max) { + max_samples_.store(max, std::memory_order_release); +} + +} // namespace profiling_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_ -- cgit v1.2.3 From 2f25e6ea5f91d1646aca2337719c9b4deb64100a Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Tue, 14 Sep 2021 19:15:10 -0700 Subject: Export of internal Abseil changes -- a7924266cefd1c175142545996c967fb18b6144f by Abseil Team : Document the performance advantages of the `ToInt64*()` family of functions. PiperOrigin-RevId: 396736253 -- 77aa845d3aa4c56a48b899ce5a5ffcf9b81991b3 by Matt Kulukundis : tiny cleanup: remove redundant `public:` PiperOrigin-RevId: 396615677 -- b837e3de0ebd99e4c4f692a80a5d7ece3e829d18 by Martijn Vels : Make btree growth factor identical to concat code The btree code is using a more aggressive growth rate on Cord::Append than the Concat version. To minimize impact on migration and existing tests using empirical assumptions on growth, this change makes the btree use the same rate as Concat logic. PiperOrigin-RevId: 396595109 -- e958c06b6ab52e389caa7b3e43d83f924367e79c by Martijn Vels : Change CordRepBtree::Dump to include capacity of flats PiperOrigin-RevId: 396591195 GitOrigin-RevId: a7924266cefd1c175142545996c967fb18b6144f Change-Id: Ia38c09ba9891364b0727509dec4776eb6aadf984 --- absl/profiling/internal/sample_recorder.h | 1 - absl/strings/cord.cc | 4 ++-- absl/strings/internal/cord_rep_btree.cc | 3 ++- absl/time/time.h | 5 +++-- 4 files changed, 7 insertions(+), 6 deletions(-) (limited to 'absl/profiling/internal/sample_recorder.h') diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h index a257ea5d..5e04a9cd 100644 --- a/absl/profiling/internal/sample_recorder.h +++ b/absl/profiling/internal/sample_recorder.h @@ -41,7 +41,6 @@ namespace profiling_internal { // samples maintained by the SampleRecorder. Type T defines the sampled data. template struct Sample { - public: // Guards the ability to restore the sample to a pristine state. This // prevents races with sampling and resurrecting an object. absl::Mutex init_mu; diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 115705a2..29af9782 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -708,8 +708,8 @@ void Cord::InlineRep::AppendArray(absl::string_view src, if (btree_enabled()) { // TODO(b/192061034): keep legacy 10% growth rate: consider other rates. rep = ForceBtree(rep); - const size_t alloc_hint = (std::min)(kMaxFlatLength, rep->length / 10); - rep = CordRepBtree::Append(rep->btree(), src, alloc_hint); + const size_t min_growth = std::max(rep->length / 10, src.size()); + rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size()); } else { // Use new block(s) for any remaining bytes that were not handled above. // Alloc extra memory only if the right child of the root of the new tree diff --git a/absl/strings/internal/cord_rep_btree.cc b/absl/strings/internal/cord_rep_btree.cc index 8fe589fa..6d53ab61 100644 --- a/absl/strings/internal/cord_rep_btree.cc +++ b/absl/strings/internal/cord_rep_btree.cc @@ -96,7 +96,8 @@ void DumpAll(const CordRep* rep, bool include_contents, std::ostream& stream, maybe_dump_data(rep); DumpAll(substring->child, include_contents, stream, depth + 1); } else if (rep->tag >= FLAT) { - stream << "Flat, len = " << rep->length; + stream << "Flat, len = " << rep->length + << ", cap = " << rep->flat()->Capacity(); maybe_dump_data(rep); } else if (rep->tag == EXTERNAL) { stream << "Extn, len = " << rep->length; diff --git a/absl/time/time.h b/absl/time/time.h index e9cbce84..5abd815a 100644 --- a/absl/time/time.h +++ b/absl/time/time.h @@ -480,8 +480,9 @@ Duration Hours(T n) { // ToInt64Hours() // // Helper functions that convert a Duration to an integral count of the -// indicated unit. These functions are shorthand for the `IDivDuration()` -// function above; see its documentation for details about overflow, etc. +// indicated unit. These return the same results as the `IDivDuration()` +// function, though they usually do so more efficiently; see the +// documentation of `IDivDuration()` for details about overflow, etc. // // Example: // -- cgit v1.2.3 From 3e1983c5c07eb8a43ad030e770cbae023a470a04 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Wed, 24 Nov 2021 22:01:08 -0800 Subject: Export of internal Abseil changes -- a9ea60e9c0ccd744b6f12fd021dbedfe826dfe84 by Matt Kulukundis : Add an internal hook to allow keeping flags in sync with global state. Rollforward, except continue including hashtablez_flags.h in absl_flags.h so users don't break. PiperOrigin-RevId: 412198044 -- 183e5c440b68c797ce4a82102f94f41c97a14674 by Martijn Vels : Internal cleanups and changes PiperOrigin-RevId: 412083793 -- 3740faf7c5a2e1723e3c7e4d1b3f3db7cbec6e61 by Abseil Team : Mark Cord::Clear() with the ABSL_ATTRIBUTE_REINITIALIZES attribute. This prevents false positives in the clang-tidy check bugprone-use-after-move; it allows Clear() to be called on a moved-from Cord without any warnings, and the Cord will thereafter be regarded as initialized again. PiperOrigin-RevId: 412082757 -- a730d3f4ba06b55ae50386920a0544592069ac01 by Abseil Team : StrJoin: Support iterators that do not have an `operator->` Allows using `StrJoin` with iterators that do not have an `operator->`. The `operator->` requirement for input iterators was dropped in C++20. PiperOrigin-RevId: 412066130 -- 6773c0ced2caa6a7855898298faecc584f3997ec by Andy Soffer : Rollback of internal hook for keeping flags in sync with global state. PiperOrigin-RevId: 411895027 -- 4e7016a2fb88ce97853ef85ad5b4f76998eacca1 by Matt Kulukundis : Add an internal hook to allow keeping flags in sync with global state. PiperOrigin-RevId: 411867376 -- 2a7d4056e467b6b5d8a7aa9398d6cb5454c10fc5 by Martijn Vels : Internal change PiperOrigin-RevId: 411806932 GitOrigin-RevId: a9ea60e9c0ccd744b6f12fd021dbedfe826dfe84 Change-Id: Ib35bb7b40774979ed2ad205bbb1744b1085eae78 --- absl/container/internal/hashtablez_sampler.cc | 37 +++++++ absl/container/internal/hashtablez_sampler.h | 9 ++ absl/profiling/internal/sample_recorder.h | 6 ++ absl/strings/BUILD.bazel | 1 + absl/strings/cord.h | 3 +- absl/strings/cord_test.cc | 26 +++-- absl/strings/internal/cord_rep_flat.h | 6 +- absl/strings/internal/str_join_internal.h | 15 +-- absl/strings/str_join_test.cc | 134 ++++++++++++++++++++++++++ 9 files changed, 218 insertions(+), 19 deletions(-) (limited to 'absl/profiling/internal/sample_recorder.h') diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 40cce047..1a3ca7cc 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -38,12 +38,18 @@ ABSL_CONST_INIT std::atomic g_hashtablez_enabled{ false }; ABSL_CONST_INIT std::atomic g_hashtablez_sample_parameter{1 << 10}; +std::atomic g_hashtablez_config_listener{nullptr}; #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased g_exponential_biased_generator; #endif +void TriggerHashtablezConfigListener() { + auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); + if (listener != nullptr) listener(); +} + } // namespace #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) @@ -163,11 +169,33 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, info->size.fetch_add(1, std::memory_order_relaxed); } +void SetHashtablezConfigListener(HashtablezConfigListener l) { + g_hashtablez_config_listener.store(l, std::memory_order_release); +} + +bool IsHashtablezEnabled() { + return g_hashtablez_enabled.load(std::memory_order_acquire); +} + void SetHashtablezEnabled(bool enabled) { + SetHashtablezEnabledInternal(enabled); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezEnabledInternal(bool enabled) { g_hashtablez_enabled.store(enabled, std::memory_order_release); } +int32_t GetHashtablezSampleParameter() { + return g_hashtablez_sample_parameter.load(std::memory_order_acquire); +} + void SetHashtablezSampleParameter(int32_t rate) { + SetHashtablezSampleParameterInternal(rate); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezSampleParameterInternal(int32_t rate) { if (rate > 0) { g_hashtablez_sample_parameter.store(rate, std::memory_order_release); } else { @@ -176,7 +204,16 @@ void SetHashtablezSampleParameter(int32_t rate) { } } +int32_t GetHashtablezMaxSamples() { + return GlobalHashtablezSampler().GetMaxSamples(); +} + void SetHashtablezMaxSamples(int32_t max) { + SetHashtablezMaxSamplesInternal(max); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezMaxSamplesInternal(int32_t max) { if (max > 0) { GlobalHashtablezSampler().SetMaxSamples(max); } else { diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 91fcdb34..ee4d293e 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -258,14 +258,23 @@ using HashtablezSampler = // Returns a global Sampler. HashtablezSampler& GlobalHashtablezSampler(); +using HashtablezConfigListener = void (*)(); +void SetHashtablezConfigListener(HashtablezConfigListener l); + // Enables or disables sampling for Swiss tables. +bool IsHashtablezEnabled(); void SetHashtablezEnabled(bool enabled); +void SetHashtablezEnabledInternal(bool enabled); // Sets the rate at which Swiss tables will be sampled. +int32_t GetHashtablezSampleParameter(); void SetHashtablezSampleParameter(int32_t rate); +void SetHashtablezSampleParameterInternal(int32_t rate); // Sets a soft max for the number of samples that will be kept. +int32_t GetHashtablezMaxSamples(); void SetHashtablezMaxSamples(int32_t max); +void SetHashtablezMaxSamplesInternal(int32_t max); // Configuration override. // This allows process-wide sampling without depending on order of diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h index 5e04a9cd..6c146210 100644 --- a/absl/profiling/internal/sample_recorder.h +++ b/absl/profiling/internal/sample_recorder.h @@ -75,6 +75,7 @@ class SampleRecorder { // samples that have been dropped. int64_t Iterate(const std::function& f); + int32_t GetMaxSamples() const; void SetMaxSamples(int32_t max); private: @@ -223,6 +224,11 @@ void SampleRecorder::SetMaxSamples(int32_t max) { max_samples_.store(max, std::memory_order_release); } +template +int32_t SampleRecorder::GetMaxSamples() const { + return max_samples_.load(std::memory_order_acquire); +} + } // namespace profiling_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel index 4e37151e..5d433cce 100644 --- a/absl/strings/BUILD.bazel +++ b/absl/strings/BUILD.bazel @@ -436,6 +436,7 @@ cc_library( "//absl/container:inlined_vector", "//absl/functional:function_ref", "//absl/meta:type_traits", + "//absl/numeric:bits", "//absl/types:optional", "//absl/types:span", ], diff --git a/absl/strings/cord.h b/absl/strings/cord.h index 662e889a..27d3475f 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h @@ -70,6 +70,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/per_thread_tls.h" @@ -215,7 +216,7 @@ class Cord { // // Releases the Cord data. Any nodes that share data with other Cords, if // applicable, will have their reference counts reduced by 1. - void Clear(); + ABSL_ATTRIBUTE_REINITIALIZES void Clear(); // Cord::Append() // diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc index 8a311799..ea865cca 100644 --- a/absl/strings/cord_test.cc +++ b/absl/strings/cord_test.cc @@ -49,6 +49,11 @@ static constexpr auto MAX_FLAT_TAG = absl::cord_internal::MAX_FLAT_TAG; typedef std::mt19937_64 RandomEngine; +using absl::cord_internal::CordRep; +using absl::cord_internal::CordRepFlat; +using absl::cord_internal::kFlatOverhead; +using absl::cord_internal::kMaxFlatLength; + static std::string RandomLowercaseString(RandomEngine* rng); static std::string RandomLowercaseString(RandomEngine* rng, size_t length); @@ -266,10 +271,6 @@ INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Values(0, 1, 2, 3), TEST(CordRepFlat, AllFlatCapacities) { - using absl::cord_internal::CordRep; - using absl::cord_internal::CordRepFlat; - using absl::cord_internal::kFlatOverhead; - // Explicitly and redundantly assert built-in min/max limits static_assert(absl::cord_internal::kFlatOverhead < 32, ""); static_assert(absl::cord_internal::kMinFlatSize == 32, ""); @@ -310,9 +311,6 @@ TEST(CordRepFlat, AllFlatCapacities) { } TEST(CordRepFlat, MaxFlatSize) { - using absl::cord_internal::CordRep; - using absl::cord_internal::CordRepFlat; - using absl::cord_internal::kMaxFlatLength; CordRepFlat* flat = CordRepFlat::New(kMaxFlatLength); EXPECT_EQ(flat->Capacity(), kMaxFlatLength); CordRep::Unref(flat); @@ -323,15 +321,23 @@ TEST(CordRepFlat, MaxFlatSize) { } TEST(CordRepFlat, MaxLargeFlatSize) { - using absl::cord_internal::CordRep; - using absl::cord_internal::CordRepFlat; - using absl::cord_internal::kFlatOverhead; const size_t size = 256 * 1024 - kFlatOverhead; CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), size); EXPECT_GE(flat->Capacity(), size); CordRep::Unref(flat); } +TEST(CordRepFlat, AllFlatSizes) { + const size_t kMaxSize = 256 * 1024; + for (size_t size = 32; size <= kMaxSize; size *=2) { + const size_t length = size - kFlatOverhead - 1; + CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), length); + EXPECT_GE(flat->Capacity(), length); + memset(flat->Data(), 0xCD, flat->Capacity()); + CordRep::Unref(flat); + } +} + TEST_P(CordTest, AllFlatSizes) { using absl::strings_internal::CordTestAccess; diff --git a/absl/strings/internal/cord_rep_flat.h b/absl/strings/internal/cord_rep_flat.h index a15c9acd..ae8b3a2a 100644 --- a/absl/strings/internal/cord_rep_flat.h +++ b/absl/strings/internal/cord_rep_flat.h @@ -20,6 +20,8 @@ #include #include +#include "absl/base/config.h" +#include "absl/base/macros.h" #include "absl/strings/internal/cord_internal.h" namespace absl { @@ -105,8 +107,8 @@ struct CordRepFlat : public CordRep { struct Large {}; // Creates a new flat node. - template - static CordRepFlat* NewImpl(size_t len) { + template + static CordRepFlat* NewImpl(size_t len, Args... args ABSL_ATTRIBUTE_UNUSED) { if (len <= kMinFlatLength) { len = kMinFlatLength; } else if (len > max_flat_size - kFlatOverhead) { diff --git a/absl/strings/internal/str_join_internal.h b/absl/strings/internal/str_join_internal.h index 31dbf672..d97d5033 100644 --- a/absl/strings/internal/str_join_internal.h +++ b/absl/strings/internal/str_join_internal.h @@ -229,10 +229,11 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s, std::string result; if (start != end) { // Sums size - size_t result_size = start->size(); + auto&& start_value = *start; + size_t result_size = start_value.size(); for (Iterator it = start; ++it != end;) { result_size += s.size(); - result_size += it->size(); + result_size += (*it).size(); } if (result_size > 0) { @@ -240,13 +241,15 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s, // Joins strings char* result_buf = &*result.begin(); - memcpy(result_buf, start->data(), start->size()); - result_buf += start->size(); + + memcpy(result_buf, start_value.data(), start_value.size()); + result_buf += start_value.size(); for (Iterator it = start; ++it != end;) { memcpy(result_buf, s.data(), s.size()); result_buf += s.size(); - memcpy(result_buf, it->data(), it->size()); - result_buf += it->size(); + auto&& value = *it; + memcpy(result_buf, value.data(), value.size()); + result_buf += value.size(); } } } diff --git a/absl/strings/str_join_test.cc b/absl/strings/str_join_test.cc index 2be6256e..c986e863 100644 --- a/absl/strings/str_join_test.cc +++ b/absl/strings/str_join_test.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" namespace { @@ -471,4 +473,136 @@ TEST(StrJoin, Tuple) { "-", absl::DereferenceFormatter(TestFormatter()))); } +// A minimal value type for `StrJoin` inputs. +// Used to ensure we do not excessively require more a specific type, such as a +// `string_view`. +// +// Anything that can be `data()` and `size()` is OK. +class TestValue { + public: + TestValue(const char* data, size_t size) : data_(data), size_(size) {} + const char* data() const { return data_; } + size_t size() const { return size_; } + + private: + const char* data_; + size_t size_; +}; + +// A minimal C++20 forward iterator, used to test that we do not impose +// excessive requirements on StrJoin inputs. +// +// The 2 main differences between pre-C++20 LegacyForwardIterator and the +// C++20 ForwardIterator are: +// 1. `operator->` is not required in C++20. +// 2. `operator*` result does not need to be an lvalue (a reference). +// +// The `operator->` requirement was removed on page 17 in: +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1037r0.pdf +// +// See the `[iterator.requirements]` section of the C++ standard. +// +// The value type is a template parameter so that we can test the behaviour +// of `StrJoin` specializations, e.g. the NoFormatter specialization for +// `string_view`. +template +class TestIterator { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = ValueT; + using pointer = void; + using reference = const value_type&; + using difference_type = int; + + // `data` must outlive the result. + static TestIterator begin(const std::vector& data) { + return TestIterator(&data, 0); + } + + static TestIterator end(const std::vector& data) { + return TestIterator(nullptr, data.size()); + } + + bool operator==(const TestIterator& other) const { + return pos_ == other.pos_; + } + bool operator!=(const TestIterator& other) const { + return pos_ != other.pos_; + } + + // This deliberately returns a `prvalue`. + // The requirement to return a reference was removed in C++20. + value_type operator*() const { + return ValueT((*data_)[pos_].data(), (*data_)[pos_].size()); + } + + // `operator->()` is deliberately omitted. + // The requirement to provide it was removed in C++20. + + TestIterator& operator++() { + ++pos_; + return *this; + } + + TestIterator operator++(int) { + TestIterator result = *this; + ++(*this); + return result; + } + + TestIterator& operator--() { + --pos_; + return *this; + } + + TestIterator operator--(int) { + TestIterator result = *this; + --(*this); + return result; + } + + private: + TestIterator(const std::vector* data, size_t pos) + : data_(data), pos_(pos) {} + + const std::vector* data_; + size_t pos_; +}; + +template +class TestIteratorRange { + public: + // `data` must be non-null and must outlive the result. + explicit TestIteratorRange(const std::vector& data) + : begin_(TestIterator::begin(data)), + end_(TestIterator::end(data)) {} + + const TestIterator& begin() const { return begin_; } + const TestIterator& end() const { return end_; } + + private: + TestIterator begin_; + TestIterator end_; +}; + +TEST(StrJoin, TestIteratorRequirementsNoFormatter) { + const std::vector a = {"a", "b", "c"}; + + // When the value type is string-like (`std::string` or `string_view`), + // the NoFormatter template specialization is used internally. + EXPECT_EQ("a-b-c", + absl::StrJoin(TestIteratorRange(a), "-")); +} + +TEST(StrJoin, TestIteratorRequirementsCustomFormatter) { + const std::vector a = {"a", "b", "c"}; + EXPECT_EQ("a-b-c", + absl::StrJoin(TestIteratorRange(a), "-", + [](std::string* out, const TestValue& value) { + absl::StrAppend( + out, + absl::string_view(value.data(), value.size())); + })); +} + } // namespace -- cgit v1.2.3 From 9336be04a242237cd41a525bedfcf3be1bb55377 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Fri, 3 Dec 2021 10:01:02 -0800 Subject: Export of internal Abseil changes -- e7f53dfbf809812e84770217777f81b6308a3084 by Abseil Team : Add a parameter pack to absl profile to allow profiles to separate dynamic data from static data that is available at constructor-time. Background: `inline_element_size` is effectively constant, but there is a data race between its initialization and its access. We had fixed that race by making inline_element_size atomic. This CL changes `inline_element_size` back to a non-atomic integer, and provides a way for all profiles to provide Register()-time values. PiperOrigin-RevId: 413960559 -- 70234c5943f8e37e17c1d9c54d8ed61d39880abf by Chris Kennelly : Document that absl::FunctionRef does not allocate. PiperOrigin-RevId: 413946831 -- 3308ae571412c4be3cc32d088c6edac98ff2d1ed by Samuel Benzaquen : Internal change PiperOrigin-RevId: 413933619 -- 1617093a730d055edcf7bc04fdd6509783f5f75d by Martijn Vels : Internal Change PiperOrigin-RevId: 413778735 -- 03ad683f059c806a6c8b04f5b79b2662c3df8c73 by Evan Brown : Unify btree erase_if definitions and optimize them so that we only do rebalancing once per leaf node. PiperOrigin-RevId: 413757280 -- 5ba402f70801938178e486617063f01c7862525d by Martijn Vels : Cleanup up cord sampling internals PiperOrigin-RevId: 413755011 -- 522da8f9d3e0f11630d89fb41952004742bc335a by Evan Brown : Add b-tree benchmark for erase_if. Since this benchmark doesn't work for std:: containers before C++20, disable it for them. PiperOrigin-RevId: 413740844 -- a690ea42de8ed4a761d00235d8b2fb7548ba9732 by Andy Getzendanner : Import of CCTZ from GitHub. PiperOrigin-RevId: 413735737 GitOrigin-RevId: e7f53dfbf809812e84770217777f81b6308a3084 Change-Id: I4f9f9039ba92831bc48971964aa063244c9fed72 --- absl/container/BUILD.bazel | 3 + absl/container/CMakeLists.txt | 3 + absl/container/btree_benchmark.cc | 64 +++++++++++++++++----- absl/container/btree_map.h | 20 +------ absl/container/btree_set.h | 20 +------ absl/container/btree_test.cc | 14 +++++ absl/container/flat_hash_map.h | 1 + absl/container/internal/btree.h | 46 ++++++++++++++++ absl/container/internal/btree_container.h | 1 + absl/container/internal/hashtablez_sampler.cc | 18 ++---- absl/container/internal/hashtablez_sampler.h | 16 +----- absl/container/internal/hashtablez_sampler_test.cc | 41 +++++++------- absl/container/internal/raw_hash_set_test.cc | 3 +- absl/container/node_hash_map.h | 1 + absl/container/node_hash_set.h | 1 + absl/container/sample_element_size_test.cc | 3 +- absl/functional/function_ref.h | 3 +- absl/functional/function_ref_test.cc | 1 + absl/profiling/internal/sample_recorder.h | 20 +++++-- absl/strings/cord.cc | 40 -------------- absl/strings/internal/cordz_update_tracker.h | 5 +- absl/strings/internal/cordz_update_tracker_test.cc | 5 +- .../internal/cctz/include/cctz/civil_time_detail.h | 18 +++--- 23 files changed, 192 insertions(+), 155 deletions(-) (limited to 'absl/profiling/internal/sample_recorder.h') diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index ea2c98b8..728c4be1 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -241,6 +241,7 @@ cc_library( ":hash_function_defaults", ":raw_hash_map", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) @@ -310,6 +311,7 @@ cc_library( ":node_slot_policy", ":raw_hash_map", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) @@ -342,6 +344,7 @@ cc_library( ":node_slot_policy", ":raw_hash_set", "//absl/algorithm:container", + "//absl/base:core_headers", "//absl/memory", ], ) diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index bb718cf8..b819deeb 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -274,6 +274,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults absl::raw_hash_map absl::algorithm_container @@ -347,6 +348,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory + absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_map @@ -381,6 +383,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_set diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 65b6790b..0ca497c8 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -153,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) { BM_LookupImpl(state, true); } -// Benchmark deletion of values from a container. +// Benchmark erasing values from a container. template -void BM_Delete(benchmark::State& state) { +void BM_Erase(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -180,9 +180,9 @@ void BM_Delete(benchmark::State& state) { } } -// Benchmark deletion of multiple values from a container. +// Benchmark erasing multiple values from a container. template -void BM_DeleteRange(benchmark::State& state) { +void BM_EraseRange(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -222,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) { } } +// Predicate that erases every other element. We can't use a lambda because +// C++11 doesn't support generic lambdas. +// TODO(b/207389011): consider adding benchmarks that remove different fractions +// of keys (e.g. 10%, 90%). +struct EraseIfPred { + uint64_t i = 0; + template + bool operator()(const T&) { + return ++i % 2; + } +}; + +// Benchmark erasing multiple values from a container with a predicate. +template +void BM_EraseIf(benchmark::State& state) { + using V = typename remove_pair_const::type; + std::vector values = GenerateValues(kBenchmarkValues); + + // Removes half of the keys per batch. + const int batch_size = (kBenchmarkValues + 1) / 2; + EraseIfPred pred; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + { + T container(values.begin(), values.end()); + state.ResumeTiming(); + erase_if(container, pred); + benchmark::DoNotOptimize(container); + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + // Benchmark steady-state insert (into first half of range) and remove (from // second half of range), treating the container approximately like a queue with // log-time access for all elements. This benchmark does not test the case where @@ -477,14 +511,14 @@ BTREE_TYPES(Time); void BM_##type##_##func(benchmark::State& state) { BM_##func(state); } \ BENCHMARK(BM_##type##_##func) -#define MY_BENCHMARK3(type) \ +#define MY_BENCHMARK3_STL(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ - MY_BENCHMARK4(type, Delete); \ - MY_BENCHMARK4(type, DeleteRange); \ + MY_BENCHMARK4(type, Erase); \ + MY_BENCHMARK4(type, EraseRange); \ MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, Fifo); \ @@ -492,9 +526,13 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeSorted) +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, EraseIf); \ + MY_BENCHMARK3_STL(type) + #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ - MY_BENCHMARK3(stl_##type); \ - MY_BENCHMARK3(stl_unordered_##type); \ + MY_BENCHMARK3_STL(stl_##type); \ + MY_BENCHMARK3_STL(stl_unordered_##type); \ MY_BENCHMARK3(btree_256_##type) #define MY_BENCHMARK2(type) \ @@ -684,12 +722,12 @@ double ContainerInfo(const btree_map>& b) { btree_set>; \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \ btree_map>; \ - MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h index 797b949f..ad484ce0 100644 --- a/absl/container/btree_map.h +++ b/absl/container/btree_map.h @@ -482,15 +482,7 @@ void swap(btree_map &x, btree_map &y) { template typename btree_map::size_type erase_if( btree_map &map, Pred pred) { - const auto initial_size = map.size(); - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - return initial_size - map.size(); + return container_internal::btree_access::erase_if(map, std::move(pred)); } // absl::btree_multimap @@ -817,15 +809,7 @@ void swap(btree_multimap &x, btree_multimap &y) { template typename btree_multimap::size_type erase_if( btree_multimap &map, Pred pred) { - const auto initial_size = map.size(); - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - return initial_size - map.size(); + return container_internal::btree_access::erase_if(map, std::move(pred)); } namespace container_internal { diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h index 2c217d8f..78826830 100644 --- a/absl/container/btree_set.h +++ b/absl/container/btree_set.h @@ -402,15 +402,7 @@ void swap(btree_set &x, btree_set &y) { template typename btree_set::size_type erase_if(btree_set &set, Pred pred) { - const auto initial_size = set.size(); - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - return initial_size - set.size(); + return container_internal::btree_access::erase_if(set, std::move(pred)); } // absl::btree_multiset<> @@ -732,15 +724,7 @@ void swap(btree_multiset &x, btree_multiset &y) { template typename btree_multiset::size_type erase_if( btree_multiset & set, Pred pred) { - const auto initial_size = set.size(); - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - return initial_size - set.size(); + return container_internal::btree_access::erase_if(set, std::move(pred)); } namespace container_internal { diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 650c51db..13cb8186 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -2490,6 +2490,20 @@ TEST(Btree, EraseIf) { EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, ElementsAre(1, 3, 5)); } + // Test that erase_if invokes the predicate once per element. + { + absl::btree_set s; + for (int i = 0; i < 1000; ++i) s.insert(i); + int pred_calls = 0; + EXPECT_EQ(erase_if(s, + [&pred_calls](int k) { + ++pred_calls; + return k % 2; + }), + 500); + EXPECT_THAT(s, SizeIs(500)); + EXPECT_EQ(pred_calls, 1000); + } } TEST(Btree, InsertOrAssign) { diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index 69fbf239..83c71029 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -36,6 +36,7 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index bf65a03d..29603379 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -830,6 +830,7 @@ class btree_node { template friend struct btree_iterator; friend class BtreeNodePeer; + friend struct btree_access; }; template @@ -964,6 +965,7 @@ struct btree_iterator { friend class btree_multiset_container; template friend class base_checker; + friend struct btree_access; const key_type &key() const { return node->key(position); } slot_type *slot() { return node->slot(position); } @@ -1336,6 +1338,8 @@ class btree { allocator_type get_allocator() const { return allocator(); } private: + friend struct btree_access; + // Internal accessor routines. node_type *root() { return root_.template get<2>(); } const node_type *root() const { return root_.template get<2>(); } @@ -2530,6 +2534,48 @@ int btree

::internal_verify(const node_type *node, const key_type *lo, return count; } +struct btree_access { + template + static auto erase_if(BtreeContainer &container, Pred pred) + -> typename BtreeContainer::size_type { + const auto initial_size = container.size(); + auto &tree = container.tree_; + auto *alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) { + if (!pred(*it)) { + ++it; + continue; + } + auto *node = it.node; + if (!node->leaf()) { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position; + node->value_destroy(it.position, alloc); + while (++it.position < node->finish()) { + if (pred(*it)) { + node->value_destroy(it.position, alloc); + } else { + node->transfer(node->slot(to_pos++), node->slot(it.position), + alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } +}; + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h index a99668c7..d28b2446 100644 --- a/absl/container/internal/btree_container.h +++ b/absl/container/internal/btree_container.h @@ -228,6 +228,7 @@ class btree_container { } protected: + friend struct btree_access; Tree tree_; }; diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index a3e15a70..1d24db5f 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -61,13 +61,10 @@ HashtablezSampler& GlobalHashtablezSampler() { return *sampler; } -// TODO(bradleybear): The comments at this constructors declaration say that the -// fields are not initialized, but this definition does initialize the fields. -// Something needs to be cleaned up. -HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } +HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling() { +void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -85,6 +82,7 @@ void HashtablezInfo::PrepareForSampling() { // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); + inline_element_size = inline_element_size_value; } static bool ShouldForceSampling() { @@ -110,9 +108,8 @@ static bool ShouldForceSampling() { HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { *next_sample = 1; - HashtablezInfo* result = GlobalHashtablezSampler().Register(); - result->inline_element_size.store(inline_element_size, - std::memory_order_relaxed); + HashtablezInfo* result = + GlobalHashtablezSampler().Register(inline_element_size); return result; } @@ -138,10 +135,7 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { return SampleSlow(next_sample, inline_element_size); } - HashtablezInfo* result = GlobalHashtablezSampler().Register(); - result->inline_element_size.store(inline_element_size, - std::memory_order_relaxed); - return result; + return GlobalHashtablezSampler().Register(inline_element_size); #endif } diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 0fd9349f..6738786c 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + void PrepareForSampling(size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. @@ -82,18 +83,6 @@ struct HashtablezInfo : public profiling_internal::Sample { std::atomic hashes_bitwise_xor; std::atomic max_reserve; - // One could imagine that inline_element_size could be non-atomic, since it - // *almost* follows the rules for the fields that are set by - // `PrepareForSampling`. However, TSAN reports a race (see b/207323922) in - // which - // A: Thread 1: Register() returns, unlocking init_mu. - // B: Thread 2: Iterate() is called, locking init_mu. - // C: Thread 1: inlined_element_size is stored. - // D: Thread 2: inlined_element_size is accessed (a race). - // A simple solution is to make inline_element_size atomic so that we treat it - // at as we do the other atomic fields. - std::atomic inline_element_size; - // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. // These are guarded by init_mu, but that is not externalized to clients, @@ -103,6 +92,7 @@ struct HashtablezInfo : public profiling_internal::Sample { absl::Time create_time; int32_t depth; void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index a7307a20..ac6ed9b1 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -70,7 +70,8 @@ std::vector GetSizes(HashtablezSampler* s) { } HashtablezInfo* Register(HashtablezSampler* s, size_t size) { - auto* info = s->Register(); + const size_t test_element_size = 17; + auto* info = s->Register(test_element_size); assert(info != nullptr); info->size.store(size); return info; @@ -81,9 +82,8 @@ TEST(HashtablezInfoTest, PrepareForSampling) { const size_t test_element_size = 17; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + info.PrepareForSampling(test_element_size); - info.inline_element_size.store(test_element_size, std::memory_order_relaxed); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -95,7 +95,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); info.capacity.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed); @@ -108,7 +108,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); - info.PrepareForSampling(); + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -119,14 +119,15 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); EXPECT_GE(info.create_time, test_start); } TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 19; + info.PrepareForSampling(test_element_size); RecordStorageChangedSlow(&info, 17, 47); EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.capacity.load(), 47); @@ -138,7 +139,8 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 23; + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.max_probe_length.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); @@ -161,8 +163,7 @@ TEST(HashtablezInfoTest, RecordErase) { const size_t test_element_size = 29; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); - info.inline_element_size.store(test_element_size); + info.PrepareForSampling(test_element_size); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); @@ -170,15 +171,14 @@ TEST(HashtablezInfoTest, RecordErase) { RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 1); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordRehash) { const size_t test_element_size = 31; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); - info.inline_element_size.store(test_element_size); + info.PrepareForSampling(test_element_size); RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength); @@ -197,13 +197,14 @@ TEST(HashtablezInfoTest, RecordRehash) { EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 1); - EXPECT_EQ(info.inline_element_size.load(), test_element_size); + EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordReservation) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(); + const size_t test_element_size = 33; + info.PrepareForSampling(test_element_size); RecordReservationSlow(&info, 3); EXPECT_EQ(info.max_reserve.load(), 3); @@ -266,7 +267,8 @@ TEST(HashtablezSamplerTest, Sample) { TEST(HashtablezSamplerTest, Handle) { auto& sampler = GlobalHashtablezSampler(); - HashtablezInfoHandle h(sampler.Register()); + const size_t test_element_size = 39; + HashtablezInfoHandle h(sampler.Register(test_element_size)); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); @@ -338,18 +340,19 @@ TEST(HashtablezSamplerTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { - pool.Schedule([&sampler, &stop]() { + const size_t elt_size = 10 + i % 2; + pool.Schedule([&sampler, &stop, elt_size]() { std::random_device rd; std::mt19937 gen(rd()); std::vector infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(elt_size)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(elt_size)); break; } case 1: { diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 4b9f6cc0..362b3cae 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -2075,8 +2075,7 @@ TEST(RawHashSamplerTest, Sample) { std::memory_order_relaxed)]++; reservations[info.max_reserve.load(std::memory_order_relaxed)]++; } - EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), - sizeof(int64_t)); + EXPECT_EQ(info.inline_element_size, sizeof(int64_t)); ++end_size; }); diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h index 0d4ebeda..ca1ed408 100644 --- a/absl/container/node_hash_map.h +++ b/absl/container/node_hash_map.h @@ -41,6 +41,7 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/node_slot_policy.h" diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h index 3bc5fe6b..9421e11e 100644 --- a/absl/container/node_hash_set.h +++ b/absl/container/node_hash_set.h @@ -38,6 +38,7 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/node_slot_policy.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export diff --git a/absl/container/sample_element_size_test.cc b/absl/container/sample_element_size_test.cc index 4bbef210..b23626b4 100644 --- a/absl/container/sample_element_size_test.cc +++ b/absl/container/sample_element_size_test.cc @@ -51,8 +51,7 @@ void TestInlineElementSize( size_t new_count = 0; sampler.Iterate([&](const HashtablezInfo& info) { if (preexisting_info.insert(&info).second) { - EXPECT_EQ(info.inline_element_size.load(std::memory_order_relaxed), - expected_element_size); + EXPECT_EQ(info.inline_element_size, expected_element_size); ++new_count; } }); diff --git a/absl/functional/function_ref.h b/absl/functional/function_ref.h index 824e3cea..f9779607 100644 --- a/absl/functional/function_ref.h +++ b/absl/functional/function_ref.h @@ -69,7 +69,8 @@ class FunctionRef; // An `absl::FunctionRef` is a lightweight wrapper to any invokable object with // a compatible signature. Generally, an `absl::FunctionRef` should only be used // as an argument type and should be preferred as an argument over a const -// reference to a `std::function`. +// reference to a `std::function`. `absl::FunctionRef` itself does not allocate, +// although the wrapped invokable may. // // Example: // diff --git a/absl/functional/function_ref_test.cc b/absl/functional/function_ref_test.cc index 3aa59745..412027cd 100644 --- a/absl/functional/function_ref_test.cc +++ b/absl/functional/function_ref_test.cc @@ -14,6 +14,7 @@ #include "absl/functional/function_ref.h" +#include #include #include "gmock/gmock.h" diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h index 6c146210..fc269bdd 100644 --- a/absl/profiling/internal/sample_recorder.h +++ b/absl/profiling/internal/sample_recorder.h @@ -59,7 +59,8 @@ class SampleRecorder { ~SampleRecorder(); // Registers for sampling. Returns an opaque registration info. - T* Register(); + template + T* Register(Targs&&... args); // Unregisters the sample. void Unregister(T* sample); @@ -81,7 +82,8 @@ class SampleRecorder { private: void PushNew(T* sample); void PushDead(T* sample); - T* PopDead(); + template + T* PopDead(Targs... args); std::atomic dropped_samples_; std::atomic size_estimate_; @@ -163,7 +165,8 @@ void SampleRecorder::PushDead(T* sample) { } template -T* SampleRecorder::PopDead() { +template +T* SampleRecorder::PopDead(Targs... args) { absl::MutexLock graveyard_lock(&graveyard_.init_mu); // The list is circular, so eventually it collapses down to @@ -175,12 +178,13 @@ T* SampleRecorder::PopDead() { absl::MutexLock sample_lock(&sample->init_mu); graveyard_.dead = sample->dead; sample->dead = nullptr; - sample->PrepareForSampling(); + sample->PrepareForSampling(std::forward(args)...); return sample; } template -T* SampleRecorder::Register() { +template +T* SampleRecorder::Register(Targs&&... args) { int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); if (size > max_samples_.load(std::memory_order_relaxed)) { size_estimate_.fetch_sub(1, std::memory_order_relaxed); @@ -188,10 +192,14 @@ T* SampleRecorder::Register() { return nullptr; } - T* sample = PopDead(); + T* sample = PopDead(args...); if (sample == nullptr) { // Resurrection failed. Hire a new warlock. sample = new T(); + { + absl::MutexLock sample_lock(&sample->init_mu); + sample->PrepareForSampling(std::forward(args)...); + } PushNew(sample); } diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 0015bb9f..5905bac0 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -468,46 +468,6 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region, return true; } -template -void Cord::InlineRep::GetAppendRegion(char** region, size_t* size, - size_t length) { - auto constexpr method = CordzUpdateTracker::kGetAppendRegion; - - CordRep* root = tree(); - size_t sz = root ? root->length : inline_size(); - if (root == nullptr) { - size_t available = kMaxInline - sz; - if (available >= (has_length ? length : 1)) { - *region = data_.as_chars() + sz; - *size = has_length ? length : available; - set_inline_size(has_length ? sz + length : kMaxInline); - return; - } - } - - size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength); - CordzUpdateScope scope(root ? data_.cordz_info() : nullptr, method); - CordRep* rep = root ? cord_internal::RemoveCrcNode(root) - : MakeFlatWithExtraCapacity(extra); - if (PrepareAppendRegion(rep, region, size, length)) { - CommitTree(root, rep, scope, method); - return; - } - - // Allocate new node. - CordRepFlat* new_node = CordRepFlat::New(extra); - new_node->length = std::min(new_node->Capacity(), length); - *region = new_node->Data(); - *size = new_node->length; - - if (btree_enabled()) { - rep = CordRepBtree::Append(ForceBtree(rep), new_node); - } else { - rep = Concat(rep, new_node); - } - CommitTree(root, rep, scope, method); -} - // Computes the memory side of the provided edge which must be a valid data edge // for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node. static bool RepMemoryUsageDataEdge(const CordRep* rep, diff --git a/absl/strings/internal/cordz_update_tracker.h b/absl/strings/internal/cordz_update_tracker.h index 7a41c180..c5170662 100644 --- a/absl/strings/internal/cordz_update_tracker.h +++ b/absl/strings/internal/cordz_update_tracker.h @@ -39,8 +39,8 @@ class CordzUpdateTracker { // Tracked update methods. enum MethodIdentifier { kUnknown, - kAppendBuffer, kAppendCord, + kAppendCordBuffer, kAppendExternalMemory, kAppendString, kAssignCord, @@ -50,13 +50,14 @@ class CordzUpdateTracker { kConstructorString, kCordReader, kFlatten, + kGetAppendBuffer, kGetAppendRegion, kMakeCordFromExternal, kMoveAppendCord, kMoveAssignCord, kMovePrependCord, - kPrependBuffer, kPrependCord, + kPrependCordBuffer, kPrependString, kRemovePrefix, kRemoveSuffix, diff --git a/absl/strings/internal/cordz_update_tracker_test.cc b/absl/strings/internal/cordz_update_tracker_test.cc index 8eda529e..9b1f7986 100644 --- a/absl/strings/internal/cordz_update_tracker_test.cc +++ b/absl/strings/internal/cordz_update_tracker_test.cc @@ -37,8 +37,8 @@ using Methods = std::array; // Returns an array of all methods defined in `MethodIdentifier` Methods AllMethods() { return Methods{Method::kUnknown, - Method::kAppendBuffer, Method::kAppendCord, + Method::kAppendCordBuffer, Method::kAppendExternalMemory, Method::kAppendString, Method::kAssignCord, @@ -48,13 +48,14 @@ Methods AllMethods() { Method::kConstructorString, Method::kCordReader, Method::kFlatten, + Method::kGetAppendBuffer, Method::kGetAppendRegion, Method::kMakeCordFromExternal, Method::kMoveAppendCord, Method::kMoveAssignCord, Method::kMovePrependCord, - Method::kPrependBuffer, Method::kPrependCord, + Method::kPrependCordBuffer, Method::kPrependString, Method::kRemovePrefix, Method::kRemoveSuffix, diff --git a/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/absl/time/internal/cctz/include/cctz/civil_time_detail.h index 8aadde57..a5b084e6 100644 --- a/absl/time/internal/cctz/include/cctz/civil_time_detail.h +++ b/absl/time/internal/cctz/include/cctz/civil_time_detail.h @@ -84,14 +84,13 @@ CONSTEXPR_F bool is_leap_year(year_t y) noexcept { return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); } CONSTEXPR_F int year_index(year_t y, month_t m) noexcept { - return (static_cast((y + (m > 2)) % 400) + 400) % 400; + const int yi = static_cast((y + (m > 2)) % 400); + return yi < 0 ? yi + 400 : yi; } -CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept { - const int yi = year_index(y, m); +CONSTEXPR_F int days_per_century(int yi) noexcept { return 36524 + (yi == 0 || yi > 300); } -CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept { - const int yi = year_index(y, m); +CONSTEXPR_F int days_per_4years(int yi) noexcept { return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96); } CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept { @@ -133,17 +132,22 @@ CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh, } } if (d > 365) { + int yi = year_index(ey, m); // Index into Gregorian 400 year cycle. for (;;) { - int n = days_per_century(ey, m); + int n = days_per_century(yi); if (d <= n) break; d -= n; ey += 100; + yi += 100; + if (yi >= 400) yi -= 400; } for (;;) { - int n = days_per_4years(ey, m); + int n = days_per_4years(yi); if (d <= n) break; d -= n; ey += 4; + yi += 4; + if (yi >= 400) yi -= 400; } for (;;) { int n = days_per_year(ey, m); -- cgit v1.2.3 From 9bb42857112ad13b23de4333fbb75eb47db9de95 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Tue, 18 Jan 2022 13:02:48 -0800 Subject: Export of internal Abseil changes -- 7f5caec21a1a88db6486b8bc2e5f6d5baba076ed by Derek Mauro : Tag absl::StatusOr with [[nodiscard]] when it is available [[nodiscard]] provides better diagnostics on classes than the current ABSL_MUST_USE_RESULT, which expands to __attribute__((warn_unused_result)) Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but we would need to fix all code to build with [[nodiscard]] first. PiperOrigin-RevId: 422628161 -- 236be69f0f34ccaa3adc831075613847797e6557 by Jorg Brown : Make sure all of absl/strings compiles even with -Wsign-compare and -Wconversion warnings. PiperOrigin-RevId: 422573904 -- 005669883a8794e6d19dc4bbb4f0e18032e2fbc9 by Chris Kennelly : Include sampling stride in hashtable sampling data. This allows us to weight each sample to control for our sampling rate. PiperOrigin-RevId: 421886935 -- 78046ce6b429b9f5b6c97b05e8448a791db93bbe by Abseil Team : Use __builtin_bit_cast for absl::bit_cast when possible This makes absl::bit_cast match the requirements of the standard when compiler support exists. PiperOrigin-RevId: 421883999 -- f397461f4bbeabd32437df0f2275663aeb51adb2 by Derek Mauro : Tag absl::Status with [[nodiscard]] when it is available [[nodiscard]] provides better diagnostics on classes than the current ABSL_MUST_USE_RESULT, which expands to __attribute__((warn_unused_result)) Ideally we would make ABSL_MUST_USE_RESULT expand to [[nodiscard]], but we would need to fix all code to build with [[nodiscard]] first. PiperOrigin-RevId: 421825565 GitOrigin-RevId: 7f5caec21a1a88db6486b8bc2e5f6d5baba076ed Change-Id: I760b45b68f6012809c70c2a584e4144a99f98733 --- absl/base/casts.h | 18 ++++++-- absl/container/internal/hashtablez_sampler.cc | 34 ++++++++++----- absl/container/internal/hashtablez_sampler.h | 18 +++++--- absl/container/internal/hashtablez_sampler_test.cc | 51 ++++++++++++++-------- absl/profiling/internal/sample_recorder.h | 1 + absl/profiling/internal/sample_recorder_test.cc | 39 +++++++++++------ absl/status/internal/status_internal.h | 8 ++++ absl/status/statusor.h | 6 +++ absl/strings/internal/escaping.cc | 9 ++-- absl/strings/internal/ostringstream.cc | 2 +- absl/strings/internal/utf8.cc | 18 ++++---- 11 files changed, 139 insertions(+), 65 deletions(-) (limited to 'absl/profiling/internal/sample_recorder.h') diff --git a/absl/base/casts.h b/absl/base/casts.h index b16af233..bf100efa 100644 --- a/absl/base/casts.h +++ b/absl/base/casts.h @@ -44,8 +44,12 @@ struct is_bitcastable bool, sizeof(Dest) == sizeof(Source) && type_traits_internal::is_trivially_copyable::value && - type_traits_internal::is_trivially_copyable::value && - std::is_default_constructible::value> {}; + type_traits_internal::is_trivially_copyable::value +#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + && std::is_default_constructible::value +#endif + > { +}; } // namespace internal_casts @@ -147,20 +151,26 @@ constexpr To implicit_cast(typename absl::internal::identity_t to) { // introducing this undefined behavior (since the original value is never // accessed in the wrong way). // -// NOTE: The requirements here are stricter than the bit_cast of standard -// proposal P0476 due to the need for workarounds and lack of intrinsics. +// NOTE: The requirements here are more strict than the bit_cast of standard +// proposal P0476 when __builtin_bit_cast is not available. // Specifically, this implementation also requires `Dest` to be // default-constructible. template < typename Dest, typename Source, typename std::enable_if::value, int>::type = 0> +#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline constexpr Dest bit_cast(const Source& source) { + return __builtin_bit_cast(Dest, source); +} +#else inline Dest bit_cast(const Source& source) { Dest dest; memcpy(static_cast(std::addressof(dest)), static_cast(std::addressof(source)), sizeof(dest)); return dest; } +#endif // NOTE: This overload is only picked if the requirements of bit_cast are // not met. It is therefore UB, but is provided temporarily as previous diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 1d24db5f..322e0547 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -27,6 +27,7 @@ #include "absl/profiling/internal/exponential_biased.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -53,7 +54,7 @@ void TriggerHashtablezConfigListener() { } // namespace #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; +ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) HashtablezSampler& GlobalHashtablezSampler() { @@ -64,7 +65,8 @@ HashtablezSampler& GlobalHashtablezSampler() { HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) { +void HashtablezInfo::PrepareForSampling(int64_t stride, + size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -77,6 +79,7 @@ void HashtablezInfo::PrepareForSampling(size_t inline_element_size_value) { max_reserve.store(0, std::memory_order_relaxed); create_time = absl::Now(); + weight = stride; // The inliner makes hardcoded skip_count difficult (especially when combined // with LTO). We use the ability to exclude stacks by regex when encoding // instead. @@ -105,23 +108,32 @@ static bool ShouldForceSampling() { return state == kForce; } -HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { - *next_sample = 1; + next_sample.next_sample = 1; + const int64_t old_stride = exchange(next_sample.sample_stride, 1); HashtablezInfo* result = - GlobalHashtablezSampler().Register(inline_element_size); + GlobalHashtablezSampler().Register(old_stride, inline_element_size); return result; } #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - *next_sample = std::numeric_limits::max(); + next_sample = { + std::numeric_limits::max(), + std::numeric_limits::max(), + }; return nullptr; #else - bool first = *next_sample < 0; - *next_sample = g_exponential_biased_generator.GetStride( + bool first = next_sample.next_sample < 0; + + const int64_t next_stride = g_exponential_biased_generator.GetStride( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); + + next_sample.next_sample = next_stride; + const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); // Small values of interval are equivalent to just sampling next time. - ABSL_ASSERT(*next_sample >= 1); + ABSL_ASSERT(next_stride >= 1); // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold // low enough that we will start sampling in a reasonable time, so we just use @@ -131,11 +143,11 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) { // We will only be negative on our first count, so we should just retry in // that case. if (first) { - if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr; + if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; return SampleSlow(next_sample, inline_element_size); } - return GlobalHashtablezSampler().Register(inline_element_size); + return GlobalHashtablezSampler().Register(old_stride, inline_element_size); #endif } diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 6738786c..e7c204ee 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -67,7 +67,7 @@ struct HashtablezInfo : public profiling_internal::Sample { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling(size_t inline_element_size_value) + void PrepareForSampling(int64_t stride, size_t inline_element_size_value) ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be @@ -145,7 +145,15 @@ inline void RecordEraseSlow(HashtablezInfo* info) { std::memory_order_relaxed); } -HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size); +struct SamplingState { + int64_t next_sample; + // When we make a sampling decision, we record that distance so we can weight + // each sample. + int64_t sample_stride; +}; + +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size); void UnsampleSlow(HashtablezInfo* info); #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) @@ -235,7 +243,7 @@ class HashtablezInfoHandle { #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; +extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) // Returns an RAII sampling handle that manages registration and unregistation @@ -243,11 +251,11 @@ extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; inline HashtablezInfoHandle Sample( size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) { + if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { return HashtablezInfoHandle(nullptr); } return HashtablezInfoHandle( - SampleSlow(&global_next_sample, inline_element_size)); + SampleSlow(global_next_sample, inline_element_size)); #else return HashtablezInfoHandle(nullptr); #endif // !ABSL_PER_THREAD_TLS diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index ac6ed9b1..77cdf2fd 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -70,8 +70,9 @@ std::vector GetSizes(HashtablezSampler* s) { } HashtablezInfo* Register(HashtablezSampler* s, size_t size) { + const int64_t test_stride = 123; const size_t test_element_size = 17; - auto* info = s->Register(test_element_size); + auto* info = s->Register(test_stride, test_element_size); assert(info != nullptr); info->size.store(size); return info; @@ -79,10 +80,11 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) { TEST(HashtablezInfoTest, PrepareForSampling) { absl::Time test_start = absl::Now(); + const int64_t test_stride = 123; const size_t test_element_size = 17; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(test_element_size); + info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); @@ -95,6 +97,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); + EXPECT_EQ(info.weight, test_stride); EXPECT_EQ(info.inline_element_size, test_element_size); info.capacity.store(1, std::memory_order_relaxed); @@ -108,7 +111,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); - info.PrepareForSampling(test_element_size); + info.PrepareForSampling(test_stride * 2, test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); @@ -119,6 +122,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); + EXPECT_EQ(info.weight, 2 * test_stride); EXPECT_EQ(info.inline_element_size, test_element_size); EXPECT_GE(info.create_time, test_start); } @@ -126,8 +130,9 @@ TEST(HashtablezInfoTest, PrepareForSampling) { TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 21; const size_t test_element_size = 19; - info.PrepareForSampling(test_element_size); + info.PrepareForSampling(test_stride, test_element_size); RecordStorageChangedSlow(&info, 17, 47); EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.capacity.load(), 47); @@ -139,8 +144,9 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 25; const size_t test_element_size = 23; - info.PrepareForSampling(test_element_size); + info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.max_probe_length.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); @@ -160,10 +166,11 @@ TEST(HashtablezInfoTest, RecordInsert) { } TEST(HashtablezInfoTest, RecordErase) { + const int64_t test_stride = 31; const size_t test_element_size = 29; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(test_element_size); + info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); @@ -175,10 +182,11 @@ TEST(HashtablezInfoTest, RecordErase) { } TEST(HashtablezInfoTest, RecordRehash) { + const int64_t test_stride = 33; const size_t test_element_size = 31; HashtablezInfo info; absl::MutexLock l(&info.init_mu); - info.PrepareForSampling(test_element_size); + info.PrepareForSampling(test_stride, test_element_size); RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength); @@ -203,8 +211,9 @@ TEST(HashtablezInfoTest, RecordRehash) { TEST(HashtablezInfoTest, RecordReservation) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 35; const size_t test_element_size = 33; - info.PrepareForSampling(test_element_size); + info.PrepareForSampling(test_stride, test_element_size); RecordReservationSlow(&info, 3); EXPECT_EQ(info.max_reserve.load(), 3); @@ -224,9 +233,10 @@ TEST(HashtablezSamplerTest, SmallSampleParameter) { SetHashtablezSampleParameter(100); for (int i = 0; i < 1000; ++i) { - int64_t next_sample = 0; - HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size); - EXPECT_GT(next_sample, 0); + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } @@ -238,9 +248,10 @@ TEST(HashtablezSamplerTest, LargeSampleParameter) { SetHashtablezSampleParameter(std::numeric_limits::max()); for (int i = 0; i < 1000; ++i) { - int64_t next_sample = 0; - HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size); - EXPECT_GT(next_sample, 0); + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } @@ -267,14 +278,16 @@ TEST(HashtablezSamplerTest, Sample) { TEST(HashtablezSamplerTest, Handle) { auto& sampler = GlobalHashtablezSampler(); + const int64_t test_stride = 41; const size_t test_element_size = 39; - HashtablezInfoHandle h(sampler.Register(test_element_size)); + HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size)); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); bool found = false; sampler.Iterate([&](const HashtablezInfo& h) { if (&h == info) { + EXPECT_EQ(h.weight, test_stride); EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); found = true; } @@ -340,19 +353,20 @@ TEST(HashtablezSamplerTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { + const int64_t sampling_stride = 11 + i % 3; const size_t elt_size = 10 + i % 2; - pool.Schedule([&sampler, &stop, elt_size]() { + pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() { std::random_device rd; std::mt19937 gen(rd()); std::vector infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register(elt_size)); + infoz.push_back(sampler.Register(sampling_stride, elt_size)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register(elt_size)); + infoz.push_back(sampler.Register(sampling_stride, elt_size)); break; } case 1: { @@ -361,6 +375,7 @@ TEST(HashtablezSamplerTest, MultiThreaded) { HashtablezInfo* info = infoz[p]; infoz[p] = infoz.back(); infoz.pop_back(); + EXPECT_EQ(info->weight, sampling_stride); sampler.Unregister(info); break; } diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h index fc269bdd..5f65983b 100644 --- a/absl/profiling/internal/sample_recorder.h +++ b/absl/profiling/internal/sample_recorder.h @@ -46,6 +46,7 @@ struct Sample { absl::Mutex init_mu; T* next = nullptr; T* dead ABSL_GUARDED_BY(init_mu) = nullptr; + int64_t weight; // How many sampling events were required to sample this one. }; // Holds samples and their associated stack traces with a soft limit of diff --git a/absl/profiling/internal/sample_recorder_test.cc b/absl/profiling/internal/sample_recorder_test.cc index ec6e0fa2..3373329a 100644 --- a/absl/profiling/internal/sample_recorder_test.cc +++ b/absl/profiling/internal/sample_recorder_test.cc @@ -36,7 +36,7 @@ using ::testing::UnorderedElementsAre; struct Info : public Sample { public: - void PrepareForSampling() {} + void PrepareForSampling(int64_t w) { weight = w; } std::atomic size; absl::Time create_time; }; @@ -49,8 +49,14 @@ std::vector GetSizes(SampleRecorder* s) { return res; } -Info* Register(SampleRecorder* s, size_t size) { - auto* info = s->Register(); +std::vector GetWeights(SampleRecorder* s) { + std::vector res; + s->Iterate([&](const Info& info) { res.push_back(info.weight); }); + return res; +} + +Info* Register(SampleRecorder* s, int64_t weight, size_t size) { + auto* info = s->Register(weight); assert(info != nullptr); info->size.store(size); return info; @@ -58,13 +64,15 @@ Info* Register(SampleRecorder* s, size_t size) { TEST(SampleRecorderTest, Registration) { SampleRecorder sampler; - auto* info1 = Register(&sampler, 1); + auto* info1 = Register(&sampler, 31, 1); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31)); - auto* info2 = Register(&sampler, 2); + auto* info2 = Register(&sampler, 32, 2); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2)); info1->size.store(3); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31, 32)); sampler.Unregister(info1); sampler.Unregister(info2); @@ -74,18 +82,22 @@ TEST(SampleRecorderTest, Unregistration) { SampleRecorder sampler; std::vector infos; for (size_t i = 0; i < 3; ++i) { - infos.push_back(Register(&sampler, i)); + infos.push_back(Register(&sampler, 33 + i, i)); } EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 34, 35)); sampler.Unregister(infos[1]); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35)); - infos.push_back(Register(&sampler, 3)); - infos.push_back(Register(&sampler, 4)); + infos.push_back(Register(&sampler, 36, 3)); + infos.push_back(Register(&sampler, 37, 4)); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 36, 37)); sampler.Unregister(infos[3]); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4)); + EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 37)); sampler.Unregister(infos[0]); sampler.Unregister(infos[2]); @@ -99,18 +111,18 @@ TEST(SampleRecorderTest, MultiThreaded) { ThreadPool pool(10); for (int i = 0; i < 10; ++i) { - pool.Schedule([&sampler, &stop]() { + pool.Schedule([&sampler, &stop, i]() { std::random_device rd; std::mt19937 gen(rd()); std::vector infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(i)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { - infoz.push_back(sampler.Register()); + infoz.push_back(sampler.Register(i)); break; } case 1: { @@ -119,6 +131,7 @@ TEST(SampleRecorderTest, MultiThreaded) { Info* info = infoz[p]; infoz[p] = infoz.back(); infoz.pop_back(); + EXPECT_EQ(info->weight, i); sampler.Unregister(info); break; } @@ -143,8 +156,8 @@ TEST(SampleRecorderTest, MultiThreaded) { TEST(SampleRecorderTest, Callback) { SampleRecorder sampler; - auto* info1 = Register(&sampler, 1); - auto* info2 = Register(&sampler, 2); + auto* info1 = Register(&sampler, 39, 1); + auto* info2 = Register(&sampler, 40, 2); static const Info* expected; diff --git a/absl/status/internal/status_internal.h b/absl/status/internal/status_internal.h index ac12940a..34914d2e 100644 --- a/absl/status/internal/status_internal.h +++ b/absl/status/internal/status_internal.h @@ -16,6 +16,7 @@ #include +#include "absl/base/attributes.h" #include "absl/container/inlined_vector.h" #include "absl/strings/cord.h" @@ -25,7 +26,14 @@ namespace absl { ABSL_NAMESPACE_BEGIN // Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs // as part of a class definitions (b/6995610), so we use a forward declaration. +// +// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict +// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available. +#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) +class [[nodiscard]] Status; +#else class ABSL_MUST_USE_RESULT Status; +#endif ABSL_NAMESPACE_END } // namespace absl #endif // !SWIG diff --git a/absl/status/statusor.h b/absl/status/statusor.h index c051fbb3..d6ebdc2b 100644 --- a/absl/status/statusor.h +++ b/absl/status/statusor.h @@ -106,7 +106,13 @@ class BadStatusOrAccess : public std::exception { // Returned StatusOr objects may not be ignored. template +#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) +// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict +// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available. +class [[nodiscard]] StatusOr; +#else class ABSL_MUST_USE_RESULT StatusOr; +#endif // ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) // absl::StatusOr // diff --git a/absl/strings/internal/escaping.cc b/absl/strings/internal/escaping.cc index c5271286..7f87e124 100644 --- a/absl/strings/internal/escaping.cc +++ b/absl/strings/internal/escaping.cc @@ -102,8 +102,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest, } } // To save time, we didn't update szdest or szsrc in the loop. So do it now. - szdest = limit_dest - cur_dest; - szsrc = limit_src - cur_src; + szdest = static_cast(limit_dest - cur_dest); + szsrc = static_cast(limit_src - cur_src); /* now deal with the tail (<=3 bytes) */ switch (szsrc) { @@ -154,7 +154,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest, // the loop because the loop above always reads 4 bytes, and the fourth // byte is past the end of the input. if (szdest < 4) return 0; - uint32_t in = (cur_src[0] << 16) + absl::big_endian::Load16(cur_src + 1); + uint32_t in = + (uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1); cur_dest[0] = base64[in >> 18]; in &= 0x3FFFF; cur_dest[1] = base64[in >> 12]; @@ -172,7 +173,7 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest, ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc); break; } - return (cur_dest - dest); + return static_cast(cur_dest - dest); } } // namespace strings_internal diff --git a/absl/strings/internal/ostringstream.cc b/absl/strings/internal/ostringstream.cc index 05324c78..dc6cfe16 100644 --- a/absl/strings/internal/ostringstream.cc +++ b/absl/strings/internal/ostringstream.cc @@ -27,7 +27,7 @@ OStringStream::Buf::int_type OStringStream::overflow(int c) { std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) { assert(s_); - s_->append(s, n); + s_->append(s, static_cast(n)); return n; } diff --git a/absl/strings/internal/utf8.cc b/absl/strings/internal/utf8.cc index 8fd8edc1..7ecb93df 100644 --- a/absl/strings/internal/utf8.cc +++ b/absl/strings/internal/utf8.cc @@ -25,25 +25,25 @@ size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) { *buffer = static_cast(utf8_char); return 1; } else if (utf8_char <= 0x7FF) { - buffer[1] = 0x80 | (utf8_char & 0x3F); + buffer[1] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[0] = 0xC0 | utf8_char; + buffer[0] = static_cast(0xC0 | utf8_char); return 2; } else if (utf8_char <= 0xFFFF) { - buffer[2] = 0x80 | (utf8_char & 0x3F); + buffer[2] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[1] = 0x80 | (utf8_char & 0x3F); + buffer[1] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[0] = 0xE0 | utf8_char; + buffer[0] = static_cast(0xE0 | utf8_char); return 3; } else { - buffer[3] = 0x80 | (utf8_char & 0x3F); + buffer[3] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[2] = 0x80 | (utf8_char & 0x3F); + buffer[2] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[1] = 0x80 | (utf8_char & 0x3F); + buffer[1] = static_cast(0x80 | (utf8_char & 0x3F)); utf8_char >>= 6; - buffer[0] = 0xF0 | utf8_char; + buffer[0] = static_cast(0xF0 | utf8_char); return 4; } } -- cgit v1.2.3