diff options
author | Abseil Team <absl-team@google.com> | 2021-05-12 17:01:06 -0700 |
---|---|---|
committer | vslashg <gfalcon@google.com> | 2021-05-12 20:28:41 -0400 |
commit | ce42de10fbea616379826e91c7c23c16bffe6e61 (patch) | |
tree | 84e546f054980b875f6be1a6eb3a9256a5b88ba5 /absl | |
parent | 7ba826e50dff1878e6ecc6b9af44097c040c8968 (diff) |
Export of internal Abseil changes
--
9fc37c11b9e46287acef00ee06ed9adcba54dd13 by Greg Falcon <gfalcon@google.com>:
Rename absl::hash_internal::HashState to absl::hash_internal::MixingHashState.
Before this change, we had two classes named HashState: absl::HashState, the public API used for type erasure, and absl::hash_internal::HashState, the internal concrete implementation ordinarily used.
The internal class used to be named `CityHashState`, but we renamed it to `HashState` it when we changed underlying hash implementation to wyhash. This inadvertent naming conflict made the code much harder to read, and this change intends to undo that.
PiperOrigin-RevId: 373481959
--
4aec55ffddebd085c239352a2e20721091f719a1 by Greg Falcon <gfalcon@google.com>:
Introduce absl::HashOf(), a convenience wrapper around absl::Hash that calculates hashes from the values of its arguments.
PiperOrigin-RevId: 373461406
--
86b5fd8db50bbc8bd0aa9258523527381fe0445d by Abseil Team <absl-team@google.com>:
Improve speed of BlockingCounter by making its most common path lock free.
With the new implementation, the fast path of BlockingCounter::DecrementCount()
is only a fetch_sub operation. This is most times much more efficient than the
previous implementation (full mutex lock/unlock). As a matter of fact, in most
actual usecases in practice, the waiter thread is already waiting on the
Wait() call when DecrementCount() is called, which makes Mutex::Unlock() take
the slow path as there's a waiter thread that it might need to wake up.
PiperOrigin-RevId: 373394164
--
65c876be5eac0cd32583ff8535ede4109d39cf3f by Martijn Vels <mvels@google.com>:
Move the 'sample copied cord' logic into MaybeTrackCord(),
This changes move the logic for selecting if a cord should remain being sampled from Cord to CordzInfo::MaybeTrackCord, and updates the documentation for the latter method.
PiperOrigin-RevId: 373363168
--
e84410bd0aada293a81dfb82656c952e209e21fb by Martijn Vels <mvels@google.com>:
Add check for the first call to cordz_should_profile() for each thread.
This prevents the first cord of a newly created thread to be always sampled, which is a 'bad' kind of determinism for sampling.
PiperOrigin-RevId: 373229768
--
bf09c589dc099ac8f4af780bf7e609c53c27574c by Samuel Benzaquen <sbenza@google.com>:
Refactor the Flags structure into an enum.
This gives us more control over the representation and allows for easier
merging during parsing.
PiperOrigin-RevId: 373163038
--
b947b0c51083b7b6508284b5d31819596c91729e by Derek Mauro <dmauro@google.com>:
Fixes warnings about shadowed variables
Fixes #956
PiperOrigin-RevId: 373158133
GitOrigin-RevId: 9fc37c11b9e46287acef00ee06ed9adcba54dd13
Change-Id: I91f35699f9bf439d1a870c6493946a310afe088c
Diffstat (limited to 'absl')
26 files changed, 501 insertions, 320 deletions
diff --git a/absl/copts/GENERATED_AbseilCopts.cmake b/absl/copts/GENERATED_AbseilCopts.cmake index 18a1e5c3..22a25eba 100644 --- a/absl/copts/GENERATED_AbseilCopts.cmake +++ b/absl/copts/GENERATED_AbseilCopts.cmake @@ -77,7 +77,7 @@ list(APPEND ABSL_LLVM_FLAGS "-Woverlength-strings" "-Wpointer-arith" "-Wself-assign" - "-Wshadow" + "-Wshadow-all" "-Wstring-conversion" "-Wtautological-overlap-compare" "-Wundef" diff --git a/absl/copts/GENERATED_copts.bzl b/absl/copts/GENERATED_copts.bzl index d2bd5608..0c7a91bd 100644 --- a/absl/copts/GENERATED_copts.bzl +++ b/absl/copts/GENERATED_copts.bzl @@ -78,7 +78,7 @@ ABSL_LLVM_FLAGS = [ "-Woverlength-strings", "-Wpointer-arith", "-Wself-assign", - "-Wshadow", + "-Wshadow-all", "-Wstring-conversion", "-Wtautological-overlap-compare", "-Wundef", diff --git a/absl/copts/copts.py b/absl/copts/copts.py index ce30df89..7268c680 100644 --- a/absl/copts/copts.py +++ b/absl/copts/copts.py @@ -93,7 +93,7 @@ COPT_VARS = { "-Woverlength-strings", "-Wpointer-arith", "-Wself-assign", - "-Wshadow", + "-Wshadow-all", "-Wstring-conversion", "-Wtautological-overlap-compare", "-Wundef", diff --git a/absl/hash/hash.h b/absl/hash/hash.h index 5de132ca..69c44fde 100644 --- a/absl/hash/hash.h +++ b/absl/hash/hash.h @@ -73,6 +73,8 @@ #ifndef ABSL_HASH_HASH_H_ #define ABSL_HASH_HASH_H_ +#include <tuple> + #include "absl/hash/internal/hash.h" namespace absl { @@ -214,6 +216,26 @@ ABSL_NAMESPACE_BEGIN template <typename T> using Hash = absl::hash_internal::Hash<T>; +// HashOf +// +// absl::HashOf() is a helper that generates a hash from the values of its +// arguments. It dispatches to absl::Hash directly, as follows: +// * HashOf(t) == absl::Hash<T>{}(t) +// * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c)) +// +// HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when +// * The argument lists have pairwise identical C++ types +// * a1 == b1 && a2 == b2 && ... +// +// The requirement that the arguments match in both type and value is critical. +// It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if +// `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`. +template <typename... Types> +size_t HashOf(const Types&... values) { + auto tuple = std::tie(values...); + return absl::Hash<decltype(tuple)>{}(tuple); +} + // HashState // // A type erased version of the hash state concept, for use in user-defined diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc index 1d2e6cf0..02b7733d 100644 --- a/absl/hash/hash_test.cc +++ b/absl/hash/hash_test.cc @@ -973,4 +973,26 @@ TEST(HashTest, DoesNotUseImplicitConversionsToBool) { absl::Hash<ValueWithBoolConversion>()(ValueWithBoolConversion{1})); } +TEST(HashOf, MatchesHashForSingleArgument) { + std::string s = "forty two"; + int i = 42; + double d = 42.0; + std::tuple<int, int> t{4, 2}; + + EXPECT_EQ(absl::HashOf(s), absl::Hash<std::string>{}(s)); + EXPECT_EQ(absl::HashOf(i), absl::Hash<int>{}(i)); + EXPECT_EQ(absl::HashOf(d), absl::Hash<double>{}(d)); + EXPECT_EQ(absl::HashOf(t), (absl::Hash<std::tuple<int, int>>{}(t))); +} + +TEST(HashOf, MatchesHashOfTupleForMultipleArguments) { + std::string hello = "hello"; + std::string world = "world"; + + EXPECT_EQ(absl::HashOf(), absl::HashOf(std::make_tuple())); + EXPECT_EQ(absl::HashOf(hello), absl::HashOf(std::make_tuple(hello))); + EXPECT_EQ(absl::HashOf(hello, world), + absl::HashOf(std::make_tuple(hello, world))); +} + } // namespace diff --git a/absl/hash/internal/hash.cc b/absl/hash/internal/hash.cc index 1433eb9d..06f53a59 100644 --- a/absl/hash/internal/hash.cc +++ b/absl/hash/internal/hash.cc @@ -18,9 +18,8 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace hash_internal { -uint64_t HashState::CombineLargeContiguousImpl32(uint64_t state, - const unsigned char* first, - size_t len) { +uint64_t MixingHashState::CombineLargeContiguousImpl32( + uint64_t state, const unsigned char* first, size_t len) { while (len >= PiecewiseChunkSize()) { state = Mix(state, absl::hash_internal::CityHash32(reinterpret_cast<const char*>(first), @@ -33,9 +32,8 @@ uint64_t HashState::CombineLargeContiguousImpl32(uint64_t state, std::integral_constant<int, 4>{}); } -uint64_t HashState::CombineLargeContiguousImpl64(uint64_t state, - const unsigned char* first, - size_t len) { +uint64_t MixingHashState::CombineLargeContiguousImpl64( + uint64_t state, const unsigned char* first, size_t len) { while (len >= PiecewiseChunkSize()) { state = Mix(state, Hash64(first, PiecewiseChunkSize())); len -= PiecewiseChunkSize(); @@ -46,7 +44,7 @@ uint64_t HashState::CombineLargeContiguousImpl64(uint64_t state, std::integral_constant<int, 8>{}); } -ABSL_CONST_INIT const void* const HashState::kSeed = &kSeed; +ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed; // The salt array used by Wyhash. This array is NOT the mechanism used to make // absl::Hash non-deterministic between program invocations. See `Seed()` for @@ -61,7 +59,7 @@ constexpr uint64_t kWyhashSalt[5] = { uint64_t{0x452821E638D01377}, }; -uint64_t HashState::WyhashImpl(const unsigned char* data, size_t len) { +uint64_t MixingHashState::WyhashImpl(const unsigned char* data, size_t len) { return Wyhash(data, len, Seed(), kWyhashSalt); } diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h index 7fb0af0b..69dbbc6b 100644 --- a/absl/hash/internal/hash.h +++ b/absl/hash/internal/hash.h @@ -379,7 +379,7 @@ template <typename H, typename... Ts> // This SFINAE gets MSVC confused under some conditions. Let's just disable it // for now. H -#else // _MSC_VER +#else // _MSC_VER typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type #endif // _MSC_VER AbslHashValue(H hash_state, const std::tuple<Ts...>& t) { @@ -714,8 +714,8 @@ template <typename T> struct is_hashable : std::integral_constant<bool, HashSelect::template Apply<T>::value> {}; -// HashState -class ABSL_DLL HashState : public HashStateBase<HashState> { +// MixingHashState +class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> { // absl::uint128 is not an alias or a thin wrapper around the intrinsic. // We use the intrinsic when available to improve performance. #ifdef ABSL_HAVE_INTRINSIC_INT128 @@ -734,22 +734,23 @@ class ABSL_DLL HashState : public HashStateBase<HashState> { public: // Move only - HashState(HashState&&) = default; - HashState& operator=(HashState&&) = default; + MixingHashState(MixingHashState&&) = default; + MixingHashState& operator=(MixingHashState&&) = default; - // HashState::combine_contiguous() + // MixingHashState::combine_contiguous() // // Fundamental base case for hash recursion: mixes the given range of bytes // into the hash state. - static HashState combine_contiguous(HashState hash_state, - const unsigned char* first, size_t size) { - return HashState( + static MixingHashState combine_contiguous(MixingHashState hash_state, + const unsigned char* first, + size_t size) { + return MixingHashState( CombineContiguousImpl(hash_state.state_, first, size, std::integral_constant<int, sizeof(size_t)>{})); } - using HashState::HashStateBase::combine_contiguous; + using MixingHashState::HashStateBase::combine_contiguous; - // HashState::hash() + // MixingHashState::hash() // // For performance reasons in non-opt mode, we specialize this for // integral types. @@ -761,24 +762,24 @@ class ABSL_DLL HashState : public HashStateBase<HashState> { return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value))); } - // Overload of HashState::hash() + // Overload of MixingHashState::hash() template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0> static size_t hash(const T& value) { - return static_cast<size_t>(combine(HashState{}, value).state_); + return static_cast<size_t>(combine(MixingHashState{}, value).state_); } private: // Invoked only once for a given argument; that plus the fact that this is // move-only ensures that there is only one non-moved-from object. - HashState() : state_(Seed()) {} + MixingHashState() : state_(Seed()) {} // Workaround for MSVC bug. // We make the type copyable to fix the calling convention, even though we // never actually copy it. Keep it private to not affect the public API of the // type. - HashState(const HashState&) = default; + MixingHashState(const MixingHashState&) = default; - explicit HashState(uint64_t state) : state_(state) {} + explicit MixingHashState(uint64_t state) : state_(state) {} // Implementation of the base case for combine_contiguous where we actually // mix the bytes into the state. @@ -793,7 +794,6 @@ class ABSL_DLL HashState : public HashStateBase<HashState> { std::integral_constant<int, 8> /* sizeof_size_t */); - // Slow dispatch path for calls to CombineContiguousImpl with a size argument // larger than PiecewiseChunkSize(). Has the same effect as calling // CombineContiguousImpl() repeatedly with the chunk stride size. @@ -911,8 +911,8 @@ class ABSL_DLL HashState : public HashStateBase<HashState> { uint64_t state_; }; -// HashState::CombineContiguousImpl() -inline uint64_t HashState::CombineContiguousImpl( +// MixingHashState::CombineContiguousImpl() +inline uint64_t MixingHashState::CombineContiguousImpl( uint64_t state, const unsigned char* first, size_t len, std::integral_constant<int, 4> /* sizeof_size_t */) { // For large values we use CityHash, for small ones we just use a @@ -934,8 +934,8 @@ inline uint64_t HashState::CombineContiguousImpl( return Mix(state, v); } -// Overload of HashState::CombineContiguousImpl() -inline uint64_t HashState::CombineContiguousImpl( +// Overload of MixingHashState::CombineContiguousImpl() +inline uint64_t MixingHashState::CombineContiguousImpl( uint64_t state, const unsigned char* first, size_t len, std::integral_constant<int, 8> /* sizeof_size_t */) { // For large values we use Wyhash or CityHash depending on the platform, for @@ -976,7 +976,9 @@ struct PoisonedHash : private AggregateBarrier { template <typename T> struct HashImpl { - size_t operator()(const T& value) const { return HashState::hash(value); } + size_t operator()(const T& value) const { + return MixingHashState::hash(value); + } }; template <typename T> diff --git a/absl/status/internal/status_internal.h b/absl/status/internal/status_internal.h index ccafd702..ac12940a 100644 --- a/absl/status/internal/status_internal.h +++ b/absl/status/internal/status_internal.h @@ -47,12 +47,12 @@ using Payloads = absl::InlinedVector<Payload, 1>; // Reference-counted representation of Status data. struct StatusRep { - StatusRep(absl::StatusCode code, absl::string_view message, - std::unique_ptr<status_internal::Payloads> payloads) + StatusRep(absl::StatusCode code_arg, absl::string_view message_arg, + std::unique_ptr<status_internal::Payloads> payloads_arg) : ref(int32_t{1}), - code(code), - message(message), - payloads(std::move(payloads)) {} + code(code_arg), + message(message_arg), + payloads(std::move(payloads_arg)) {} std::atomic<int32_t> ref; absl::StatusCode code; diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 5dad781e..f5aa6e47 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -536,24 +536,10 @@ void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) { return; } - // See b/187581164: unsample cord if already sampled - // TODO(b/117940323): continuously 'assigned to' cords would reach 100% - // sampling probability. Imagine a cord x in some cache: - // cache.SetCord(const Cord& foo) { - // x = foo; - // } - // CordzInfo::MaybeTrackCord does: - // x.profiled = foo.profiled | x.profiled | random(cordz_mean_interval) - // Which means it will on the long run converge to 'always samples' - // The real fix is in CordzMaybeTrackCord, but the below is a low risk - // forward fix for b/187581164 and similar BT benchmark regressions. - if (ABSL_PREDICT_FALSE(is_profiled())) { - cordz_info()->Untrack(); - clear_cordz_info(); - } - CordRep* tree = as_tree(); if (CordRep* src_tree = src.tree()) { + // Leave any existing `cordz_info` in place, and let MaybeTrackCord() + // decide if this cord should be (or remains to be) sampled or not. data_.set_tree(CordRep::Ref(src_tree)); CordzInfo::MaybeTrackCord(data_, src.data_, method); } else { diff --git a/absl/strings/cordz_test.cc b/absl/strings/cordz_test.cc index 0e11f5c8..2b7d30b0 100644 --- a/absl/strings/cordz_test.cc +++ b/absl/strings/cordz_test.cc @@ -69,6 +69,7 @@ absl::string_view MakeString(TestCordSize size) { // Returns a cord with a sampled method of kAppendString. absl::Cord MakeAppendStringCord(TestCordSize size) { + CordzSamplingIntervalHelper always(1); absl::Cord cord; cord.Append(MakeString(size)); return cord; @@ -136,21 +137,21 @@ TEST_P(CordzStringTest, ConstructString) { } } -TEST(CordzTest, CopyConstruct) { +TEST(CordzTest, CopyConstructFromUnsampled) { CordzSamplingIntervalHelper sample_every{1}; Cord src = UnsampledCord(MakeString(TestCordSize::kLarge)); Cord cord(src); - EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorCord)); + EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr)); } TEST(CordzTest, CopyConstructFromSampled) { - CordzSamplingIntervalHelper sample_every{1}; - Cord src(MakeString(TestCordSize::kLarge)); + CordzSamplingIntervalHelper sample_never{99999}; + Cord src = MakeAppendStringCord(TestCordSize::kLarge); Cord cord(src); ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorCord)); CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics(); - EXPECT_THAT(stats.parent_method, Eq(Method::kConstructorString)); - EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(1)); + EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString)); + EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1)); } TEST(CordzTest, MoveConstruct) { @@ -160,12 +161,12 @@ TEST(CordzTest, MoveConstruct) { EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kConstructorString)); } -TEST_P(CordzUpdateTest, AssignCord) { +TEST_P(CordzUpdateTest, AssignUnsampledCord) { Cord src = UnsampledCord(MakeString(TestCordSize::kLarge)); + const CordzInfo* info = GetCordzInfoForTesting(cord()); cord() = src; - EXPECT_THAT(cord(), HasValidCordzInfoOf(Method::kAssignCord)); - CordzStatistics stats = GetCordzInfoForTesting(cord())->GetCordzStatistics(); - EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0)); + EXPECT_THAT(GetCordzInfoForTesting(cord()), Eq(nullptr)); + EXPECT_FALSE(CordzInfoIsListed(info)); } TEST_P(CordzUpdateTest, AssignSampledCord) { @@ -178,10 +179,22 @@ TEST_P(CordzUpdateTest, AssignSampledCord) { EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0)); } -TEST(CordzUpdateTest, AssignSampledCordToUnsampledCord) { - CordzSamplingIntervalHelper sample_every{1}; +TEST(CordzUpdateTest, AssignSampledCordToInlined) { + CordzSamplingIntervalHelper sample_never{99999}; + Cord cord; Cord src = MakeAppendStringCord(TestCordSize::kLarge); + cord = src; + ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kAssignCord)); + CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics(); + EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString)); + EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1)); + EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0)); +} + +TEST(CordzUpdateTest, AssignSampledCordToUnsampledCord) { + CordzSamplingIntervalHelper sample_never{99999}; Cord cord = UnsampledCord(MakeString(TestCordSize::kLarge)); + Cord src = MakeAppendStringCord(TestCordSize::kLarge); cord = src; ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kAssignCord)); CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics(); @@ -190,6 +203,26 @@ TEST(CordzUpdateTest, AssignSampledCordToUnsampledCord) { EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0)); } +TEST(CordzUpdateTest, AssignUnsampledCordToSampledCordWithoutSampling) { + CordzSamplingIntervalHelper sample_never{99999}; + Cord cord = MakeAppendStringCord(TestCordSize::kLarge); + const CordzInfo* info = GetCordzInfoForTesting(cord); + Cord src = UnsampledCord(MakeString(TestCordSize::kLarge)); + cord = src; + EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr)); + EXPECT_FALSE(CordzInfoIsListed(info)); +} + +TEST(CordzUpdateTest, AssignUnsampledCordToSampledCordWithSampling) { + CordzSamplingIntervalHelper sample_every{1}; + Cord cord = MakeAppendStringCord(TestCordSize::kLarge); + const CordzInfo* info = GetCordzInfoForTesting(cord); + Cord src = UnsampledCord(MakeString(TestCordSize::kLarge)); + cord = src; + EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr)); + EXPECT_FALSE(CordzInfoIsListed(info)); +} + TEST(CordzUpdateTest, AssignSampledCordToSampledCord) { CordzSamplingIntervalHelper sample_every{1}; Cord src = MakeAppendStringCord(TestCordSize::kLarge); @@ -202,7 +235,19 @@ TEST(CordzUpdateTest, AssignSampledCordToSampledCord) { EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0)); } -TEST(CordzTest, AssignInlinedCord) { +TEST(CordzUpdateTest, AssignUnsampledCordToSampledCord) { + CordzSamplingIntervalHelper sample_every{1}; + Cord src = MakeAppendStringCord(TestCordSize::kLarge); + Cord cord(MakeString(TestCordSize::kLarge)); + cord = src; + ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kAssignCord)); + CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics(); + EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString)); + EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1)); + EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(0)); +} + +TEST(CordzTest, AssignInlinedCordToSampledCord) { CordzSampleToken token; CordzSamplingIntervalHelper sample_every{1}; Cord cord(MakeString(TestCordSize::kLarge)); @@ -389,28 +434,28 @@ TEST(CordzTest, RemoveSuffix) { EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr)); } -TEST(CordzTest, SubCord) { +TEST(CordzTest, SubCordFromUnsampledCord) { CordzSamplingIntervalHelper sample_every{1}; Cord src = UnsampledCord(MakeString(TestCordSize::kLarge)); Cord cord = src.Subcord(10, src.size() / 2); - EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kSubCord)); -} - -TEST(CordzTest, SmallSubCord) { - CordzSamplingIntervalHelper sample_every{1}; - Cord src = UnsampledCord(MakeString(TestCordSize::kLarge)); - Cord cord = src.Subcord(10, kMaxInline + 1); - EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kSubCord)); + EXPECT_THAT(GetCordzInfoForTesting(cord), Eq(nullptr)); } TEST(CordzTest, SubCordFromSampledCord) { - CordzSamplingIntervalHelper sample_every{1}; - Cord src(MakeString(TestCordSize::kLarge)); + CordzSamplingIntervalHelper sample_never{99999}; + Cord src = MakeAppendStringCord(TestCordSize::kLarge); Cord cord = src.Subcord(10, src.size() / 2); - EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kSubCord)); + ASSERT_THAT(cord, HasValidCordzInfoOf(Method::kSubCord)); CordzStatistics stats = GetCordzInfoForTesting(cord)->GetCordzStatistics(); - EXPECT_THAT(stats.parent_method, Eq(Method::kConstructorString)); - EXPECT_THAT(stats.update_tracker.Value(Method::kConstructorString), Eq(1)); + EXPECT_THAT(stats.parent_method, Eq(Method::kAppendString)); + EXPECT_THAT(stats.update_tracker.Value(Method::kAppendString), Eq(1)); +} + +TEST(CordzTest, SmallSubCord) { + CordzSamplingIntervalHelper sample_never{99999}; + Cord src = MakeAppendStringCord(TestCordSize::kLarge); + Cord cord = src.Subcord(10, kMaxInline + 1); + EXPECT_THAT(cord, HasValidCordzInfoOf(Method::kSubCord)); } } // namespace diff --git a/absl/strings/internal/cord_rep_ring.cc b/absl/strings/internal/cord_rep_ring.cc index 09951290..f78c94e1 100644 --- a/absl/strings/internal/cord_rep_ring.cc +++ b/absl/strings/internal/cord_rep_ring.cc @@ -32,15 +32,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { -// See https://bugs.llvm.org/show_bug.cgi?id=48477 -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wshadow" -#if __has_warning("-Wshadow-field") -#pragma clang diagnostic ignored "-Wshadow-field" -#endif -#endif - namespace { using index_type = CordRepRing::index_type; @@ -450,12 +441,12 @@ Span<char> CordRepRing::GetPrependBuffer(size_t size) { } CordRepRing* CordRepRing::CreateFromLeaf(CordRep* child, size_t offset, - size_t length, size_t extra) { + size_t len, size_t extra) { CordRepRing* rep = CordRepRing::New(1, extra); rep->head_ = 0; rep->tail_ = rep->advance(0); - rep->length = length; - rep->entry_end_pos()[0] = length; + rep->length = len; + rep->entry_end_pos()[0] = len; rep->entry_child()[0] = child; rep->entry_data_offset()[0] = static_cast<offset_type>(offset); return Validate(rep); @@ -463,16 +454,16 @@ CordRepRing* CordRepRing::CreateFromLeaf(CordRep* child, size_t offset, CordRepRing* CordRepRing::CreateSlow(CordRep* child, size_t extra) { CordRepRing* rep = nullptr; - Consume(child, [&](CordRep* child, size_t offset, size_t length) { - if (IsFlatOrExternal(child)) { - rep = rep ? AppendLeaf(rep, child, offset, length) - : CreateFromLeaf(child, offset, length, extra); + Consume(child, [&](CordRep* child_arg, size_t offset, size_t len) { + if (IsFlatOrExternal(child_arg)) { + rep = rep ? AppendLeaf(rep, child_arg, offset, len) + : CreateFromLeaf(child_arg, offset, len, extra); } else if (rep) { - rep = AddRing<AddMode::kAppend>(rep, child->ring(), offset, length); - } else if (offset == 0 && child->length == length) { - rep = Mutable(child->ring(), extra); + rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len); + } else if (offset == 0 && child_arg->length == len) { + rep = Mutable(child_arg->ring(), extra); } else { - rep = SubRing(child->ring(), offset, length, extra); + rep = SubRing(child_arg->ring(), offset, len, extra); } }); return Validate(rep, nullptr, __LINE__); @@ -491,18 +482,18 @@ CordRepRing* CordRepRing::Create(CordRep* child, size_t extra) { template <CordRepRing::AddMode mode> CordRepRing* CordRepRing::AddRing(CordRepRing* rep, CordRepRing* ring, - size_t offset, size_t length) { + size_t offset, size_t len) { assert(offset < ring->length); constexpr bool append = mode == AddMode::kAppend; Position head = ring->Find(offset); - Position tail = ring->FindTail(head.index, offset + length); + Position tail = ring->FindTail(head.index, offset + len); const index_type entries = ring->entries(head.index, tail.index); rep = Mutable(rep, entries); // The delta for making ring[head].end_pos into 'len - offset' const pos_type delta_length = - (append ? rep->begin_pos_ + rep->length : rep->begin_pos_ - length) - + (append ? rep->begin_pos_ + rep->length : rep->begin_pos_ - len) - ring->entry_begin_pos(head.index) - head.offset; // Start filling at `tail`, or `entries` before `head` @@ -543,36 +534,36 @@ CordRepRing* CordRepRing::AddRing(CordRepRing* rep, CordRepRing* ring, } // Commit changes - rep->length += length; + rep->length += len; if (append) { rep->tail_ = filler.pos(); } else { rep->head_ = filler.head(); - rep->begin_pos_ -= length; + rep->begin_pos_ -= len; } return Validate(rep); } CordRepRing* CordRepRing::AppendSlow(CordRepRing* rep, CordRep* child) { - Consume(child, [&rep](CordRep* child, size_t offset, size_t length) { - if (child->tag == RING) { - rep = AddRing<AddMode::kAppend>(rep, child->ring(), offset, length); + Consume(child, [&rep](CordRep* child_arg, size_t offset, size_t len) { + if (child_arg->tag == RING) { + rep = AddRing<AddMode::kAppend>(rep, child_arg->ring(), offset, len); } else { - rep = AppendLeaf(rep, child, offset, length); + rep = AppendLeaf(rep, child_arg, offset, len); } }); return rep; } CordRepRing* CordRepRing::AppendLeaf(CordRepRing* rep, CordRep* child, - size_t offset, size_t length) { + size_t offset, size_t len) { rep = Mutable(rep, 1); index_type back = rep->tail_; const pos_type begin_pos = rep->begin_pos_ + rep->length; rep->tail_ = rep->advance(rep->tail_); - rep->length += length; - rep->entry_end_pos()[back] = begin_pos + length; + rep->length += len; + rep->entry_end_pos()[back] = begin_pos + len; rep->entry_child()[back] = child; rep->entry_data_offset()[back] = static_cast<offset_type>(offset); return Validate(rep, nullptr, __LINE__); @@ -590,24 +581,24 @@ CordRepRing* CordRepRing::Append(CordRepRing* rep, CordRep* child) { } CordRepRing* CordRepRing::PrependSlow(CordRepRing* rep, CordRep* child) { - RConsume(child, [&](CordRep* child, size_t offset, size_t length) { - if (IsFlatOrExternal(child)) { - rep = PrependLeaf(rep, child, offset, length); + RConsume(child, [&](CordRep* child_arg, size_t offset, size_t len) { + if (IsFlatOrExternal(child_arg)) { + rep = PrependLeaf(rep, child_arg, offset, len); } else { - rep = AddRing<AddMode::kPrepend>(rep, child->ring(), offset, length); + rep = AddRing<AddMode::kPrepend>(rep, child_arg->ring(), offset, len); } }); return Validate(rep); } CordRepRing* CordRepRing::PrependLeaf(CordRepRing* rep, CordRep* child, - size_t offset, size_t length) { + size_t offset, size_t len) { rep = Mutable(rep, 1); index_type head = rep->retreat(rep->head_); pos_type end_pos = rep->begin_pos_; rep->head_ = head; - rep->length += length; - rep->begin_pos_ -= length; + rep->length += len; + rep->begin_pos_ -= len; rep->entry_end_pos()[head] = end_pos; rep->entry_child()[head] = child; rep->entry_data_offset()[head] = static_cast<offset_type>(offset); @@ -787,18 +778,18 @@ char CordRepRing::GetCharacter(size_t offset) const { } CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset, - size_t length, size_t extra) { + size_t len, size_t extra) { assert(offset <= rep->length); - assert(offset <= rep->length - length); + assert(offset <= rep->length - len); - if (length == 0) { + if (len == 0) { CordRep::Unref(rep); return nullptr; } // Find position of first byte Position head = rep->Find(offset); - Position tail = rep->FindTail(head.index, offset + length); + Position tail = rep->FindTail(head.index, offset + len); const size_t new_entries = rep->entries(head.index, tail.index); if (rep->refcount.IsOne() && extra <= (rep->capacity() - new_entries)) { @@ -815,7 +806,7 @@ CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset, } // Adjust begin_pos and length - rep->length = length; + rep->length = len; rep->begin_pos_ += offset; // Adjust head and tail blocks @@ -889,10 +880,6 @@ CordRepRing* CordRepRing::RemoveSuffix(CordRepRing* rep, size_t len, return Validate(rep); } -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - } // namespace cord_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/strings/internal/cord_rep_ring.h b/absl/strings/internal/cord_rep_ring.h index 830f2b2a..2082a565 100644 --- a/absl/strings/internal/cord_rep_ring.h +++ b/absl/strings/internal/cord_rep_ring.h @@ -30,15 +30,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace cord_internal { -// See https://bugs.llvm.org/show_bug.cgi?id=48477 -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wshadow" -#if __has_warning("-Wshadow-field") -#pragma clang diagnostic ignored "-Wshadow-field" -#endif -#endif - // All operations modifying a ring buffer are implemented as static methods // requiring a CordRepRing instance with a reference adopted by the method. // @@ -210,23 +201,23 @@ class CordRepRing : public CordRep { // referencing up to `size` capacity directly before the existing data. Span<char> GetPrependBuffer(size_t size); - // Returns a cord ring buffer containing `length` bytes of data starting at + // Returns a cord ring buffer containing `len` bytes of data starting at // `offset`. If the input is not shared, this function will remove all head // and tail child nodes outside of the requested range, and adjust the new // head and tail nodes as required. If the input is shared, this function // returns a new instance sharing some or all of the nodes from the input. - static CordRepRing* SubRing(CordRepRing* r, size_t offset, size_t length, + static CordRepRing* SubRing(CordRepRing* r, size_t offset, size_t len, size_t extra = 0); - // Returns a cord ring buffer with the first `length` bytes removed. + // Returns a cord ring buffer with the first `len` bytes removed. // If the input is not shared, this function will remove all head child nodes // fully inside the first `length` bytes, and adjust the new head as required. // If the input is shared, this function returns a new instance sharing some // or all of the nodes from the input. - static CordRepRing* RemoveSuffix(CordRepRing* r, size_t length, + static CordRepRing* RemoveSuffix(CordRepRing* r, size_t len, size_t extra = 0); - // Returns a cord ring buffer with the last `length` bytes removed. + // Returns a cord ring buffer with the last `len` bytes removed. // If the input is not shared, this function will remove all head child nodes // fully inside the first `length` bytes, and adjust the new head as required. // If the input is shared, this function returns a new instance sharing some @@ -242,12 +233,12 @@ class CordRepRing : public CordRep { // function returns false, and `fragment` is left unchanged. bool IsFlat(absl::string_view* fragment) const; - // Returns true if the data starting at `offset` with length `length` is + // Returns true if the data starting at `offset` with length `len` is // managed by this instance inside a single contiguous buffer, in which case // the (optional) output parameter `fragment` is set to the contiguous memory // starting at offset `offset` with length `length`. Otherwise, the function // returns false, and `fragment` is left unchanged. - bool IsFlat(size_t offset, size_t length, absl::string_view* fragment) const; + bool IsFlat(size_t offset, size_t len, absl::string_view* fragment) const; // Testing only: set capacity to requested capacity. void SetCapacityForTesting(size_t capacity); @@ -473,10 +464,10 @@ class CordRepRing : public CordRep { size_t length, size_t extra); // Appends or prepends (depending on AddMode) the ring buffer in `ring' to - // `rep` starting at `offset` with length `length`. + // `rep` starting at `offset` with length `len`. template <AddMode mode> static CordRepRing* AddRing(CordRepRing* rep, CordRepRing* ring, - size_t offset, size_t length); + size_t offset, size_t len); // Increases the data offset for entry `index` by `n`. void AddDataOffset(index_type index, size_t n); @@ -596,12 +587,12 @@ inline bool CordRepRing::IsFlat(absl::string_view* fragment) const { return false; } -inline bool CordRepRing::IsFlat(size_t offset, size_t length, +inline bool CordRepRing::IsFlat(size_t offset, size_t len, absl::string_view* fragment) const { const Position pos = Find(offset); const absl::string_view data = entry_data(pos.index); - if (data.length() >= length && data.length() - length >= pos.offset) { - if (fragment) *fragment = data.substr(pos.offset, length); + if (data.length() >= len && data.length() - len >= pos.offset) { + if (fragment) *fragment = data.substr(pos.offset, len); return true; } return false; @@ -609,10 +600,6 @@ inline bool CordRepRing::IsFlat(size_t offset, size_t length, std::ostream& operator<<(std::ostream& s, const CordRepRing& rep); -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - } // namespace cord_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/strings/internal/cordz_functions.cc b/absl/strings/internal/cordz_functions.cc index 6ad864f1..f30080f8 100644 --- a/absl/strings/internal/cordz_functions.cc +++ b/absl/strings/internal/cordz_functions.cc @@ -44,7 +44,10 @@ std::atomic<int> g_cordz_mean_interval(50000); #ifdef ABSL_INTERNAL_CORDZ_ENABLED -ABSL_CONST_INIT thread_local int64_t cordz_next_sample = 0; +// Special negative 'not initialized' per thread value for cordz_next_sample. +static constexpr int64_t kInitCordzNextSample = -1; + +ABSL_CONST_INIT thread_local int64_t cordz_next_sample = kInitCordzNextSample; // kIntervalIfDisabled is the number of profile-eligible events need to occur // before the code will confirm that cordz is still disabled. @@ -77,8 +80,11 @@ ABSL_ATTRIBUTE_NOINLINE bool cordz_should_profile_slow() { } if (cordz_next_sample <= 0) { + // If first check on current thread, check cordz_should_profile() + // again using the created (initial) stride in cordz_next_sample. + const bool initialized = cordz_next_sample != kInitCordzNextSample; cordz_next_sample = exponential_biased_generator.GetStride(mean_interval); - return true; + return initialized || cordz_should_profile(); } --cordz_next_sample; diff --git a/absl/strings/internal/cordz_functions_test.cc b/absl/strings/internal/cordz_functions_test.cc index f2cefae3..350623c1 100644 --- a/absl/strings/internal/cordz_functions_test.cc +++ b/absl/strings/internal/cordz_functions_test.cc @@ -14,6 +14,8 @@ #include "absl/strings/internal/cordz_functions.h" +#include <thread> // NOLINT we need real clean new threads + #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/config.h" @@ -63,6 +65,22 @@ TEST(CordzFunctionsTest, ShouldProfileAlways) { set_cordz_mean_interval(orig_sample_rate); } +TEST(CordzFunctionsTest, DoesNotAlwaysSampleFirstCord) { + // Set large enough interval such that the chance of 'tons' of threads + // randomly sampling the first call is infinitely small. + set_cordz_mean_interval(10000); + int tries = 0; + bool sampled = false; + do { + ++tries; + ASSERT_THAT(tries, Le(1000)); + std::thread thread([&sampled] { + sampled = cordz_should_profile(); + }); + thread.join(); + } while (sampled); +} + TEST(CordzFunctionsTest, ShouldProfileRate) { static constexpr int kDesiredMeanInterval = 1000; static constexpr int kSamples = 10000; diff --git a/absl/strings/internal/cordz_info.cc b/absl/strings/internal/cordz_info.cc index a6b045ff..a3a0b9c0 100644 --- a/absl/strings/internal/cordz_info.cc +++ b/absl/strings/internal/cordz_info.cc @@ -293,6 +293,16 @@ void CordzInfo::TrackCord(InlineData& cord, const InlineData& src, cordz_info->Track(); } +void CordzInfo::MaybeTrackCordImpl(InlineData& cord, const InlineData& src, + MethodIdentifier method) { + if (src.is_profiled()) { + TrackCord(cord, src, method); + } else if (cord.is_profiled()) { + cord.cordz_info()->Untrack(); + cord.clear_cordz_info(); + } +} + CordzInfo::MethodIdentifier CordzInfo::GetParentMethod(const CordzInfo* src) { if (src == nullptr) return MethodIdentifier::kUnknown; return src->parent_method_ != MethodIdentifier::kUnknown ? src->parent_method_ diff --git a/absl/strings/internal/cordz_info.h b/absl/strings/internal/cordz_info.h index 29237930..13aaee17 100644 --- a/absl/strings/internal/cordz_info.h +++ b/absl/strings/internal/cordz_info.h @@ -77,13 +77,31 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle { static void MaybeTrackCord(InlineData& cord, MethodIdentifier method); // Maybe sample the cord identified by 'cord' for method 'method'. - // `src` identifies a 'parent' cord which content is copied into the current - // cord, typically the input cord for an assign emthod or copy constructor. - // Invokes the corresponding `TrackCord` method if either cord is sampled, or - // if `cord` is randomly picked for sampling. Possible scenarios: - // * `src` is sampled: `cord` will be set to sampled if not already sampled. - // Parent stack and update stats of `src` are copied into `cord` - // * `src` is not sampled: `cord` may be randomly picked for sampling. + // `src` identifies a 'parent' cord which is assigned to `cord`, typically the + // input cord for a copy constructor, or an assign method such as `operator=` + // `cord` will be sampled if (and only if) `src` is sampled. + // If `cord` is currently being sampled and `src` is not being sampled, then + // this function will stop sampling the cord and reset the cord's cordz_info. + // + // Previously this function defined that `cord` will be sampled if either + // `src` is sampled, or if `cord` is randomly picked for sampling. However, + // this can cause issues, as there may be paths where some cord is assigned an + // indirect copy of it's own value. As such a 'string of copies' would then + // remain sampled (`src.is_profiled`), then assigning such a cord back to + // 'itself' creates a cycle where the cord will converge to 'always sampled`. + // + // For example: + // + // Cord x; + // for (...) { + // // Copy ctor --> y.is_profiled := x.is_profiled | random(...) + // Cord y = x; + // ... + // // Assign x = y --> x.is_profiled = y.is_profiled | random(...) + // // ==> x.is_profiled |= random(...) + // // ==> x converges to 'always profiled' + // x = y; + // } static void MaybeTrackCord(InlineData& cord, const InlineData& src, MethodIdentifier method); @@ -201,6 +219,12 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle { #endif } + // Non-inlined implementation of `MaybeTrackCord`, which is executed if + // either `src` is sampled or `cord` is sampled, and either untracks or + // tracks `cord` as documented per `MaybeTrackCord`. + static void MaybeTrackCordImpl(InlineData& cord, const InlineData& src, + MethodIdentifier method); + ABSL_CONST_INIT static List global_list_; List* const list_ = &global_list_; @@ -232,9 +256,8 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord( inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CordzInfo::MaybeTrackCord( InlineData& cord, const InlineData& src, MethodIdentifier method) { - if (ABSL_PREDICT_FALSE(InlineData::is_either_profiled(cord, src)) || - ABSL_PREDICT_FALSE(cordz_should_profile())) { - TrackCord(cord, src, method); + if (ABSL_PREDICT_FALSE(InlineData::is_either_profiled(cord, src))) { + MaybeTrackCordImpl(cord, src, method); } } diff --git a/absl/strings/internal/cordz_info_test.cc b/absl/strings/internal/cordz_info_test.cc index 59a8c525..b98343ae 100644 --- a/absl/strings/internal/cordz_info_test.cc +++ b/absl/strings/internal/cordz_info_test.cc @@ -74,19 +74,49 @@ TEST(CordzInfoTest, TrackCord) { info->Untrack(); } -TEST(CordzInfoTest, MaybeTrackCordOnSampledCord) { - TestCordData data1; - CordzInfo::TrackCord(data1.data, kTrackCordMethod); - CordzInfo* info1 = data1.data.cordz_info(); - TestCordData data2; - CordzInfo::MaybeTrackCord(data2.data, data1.data, kTrackCordMethod); - CordzInfo* info2 = data2.data.cordz_info(); - ASSERT_THAT(info2, Ne(nullptr)); - EXPECT_THAT(info2->GetCordRepForTesting(), Eq(data2.rep.rep)); - info2->Untrack(); - info1->Untrack(); +TEST(CordzInfoTest, MaybeTrackChildCordWithoutSampling) { + CordzSamplingIntervalHelper sample_none(99999); + TestCordData parent, child; + CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod); + EXPECT_THAT(child.data.cordz_info(), Eq(nullptr)); +} + +TEST(CordzInfoTest, MaybeTrackChildCordWithSampling) { + CordzSamplingIntervalHelper sample_all(1); + TestCordData parent, child; + CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod); + EXPECT_THAT(child.data.cordz_info(), Eq(nullptr)); } +TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingParentSampled) { + CordzSamplingIntervalHelper sample_none(99999); + TestCordData parent, child; + CordzInfo::TrackCord(parent.data, kTrackCordMethod); + CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod); + CordzInfo* parent_info = parent.data.cordz_info(); + CordzInfo* child_info = child.data.cordz_info(); + ASSERT_THAT(child_info, Ne(nullptr)); + EXPECT_THAT(child_info->GetCordRepForTesting(), Eq(child.rep.rep)); + EXPECT_THAT(child_info->GetParentStack(), parent_info->GetStack()); + parent_info->Untrack(); + child_info->Untrack(); +} + +TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingChildSampled) { + CordzSamplingIntervalHelper sample_none(99999); + TestCordData parent, child; + CordzInfo::TrackCord(child.data, kTrackCordMethod); + CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod); + EXPECT_THAT(child.data.cordz_info(), Eq(nullptr)); +} + +TEST(CordzInfoTest, MaybeTrackChildCordWithSamplingChildSampled) { + CordzSamplingIntervalHelper sample_all(1); + TestCordData parent, child; + CordzInfo::TrackCord(child.data, kTrackCordMethod); + CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod); + EXPECT_THAT(child.data.cordz_info(), Eq(nullptr)); +} TEST(CordzInfoTest, UntrackCord) { TestCordData data; diff --git a/absl/strings/internal/str_format/bind.cc b/absl/strings/internal/str_format/bind.cc index 4e68b90b..c988ba8f 100644 --- a/absl/strings/internal/str_format/bind.cc +++ b/absl/strings/internal/str_format/bind.cc @@ -58,7 +58,7 @@ inline bool ArgContext::Bind(const UnboundConversion* unbound, if (static_cast<size_t>(arg_position - 1) >= pack_.size()) return false; arg = &pack_[arg_position - 1]; // 1-based - if (!unbound->flags.basic) { + if (unbound->flags != Flags::kBasic) { int width = unbound->width.value(); bool force_left = false; if (unbound->width.is_from_arg()) { @@ -84,9 +84,8 @@ inline bool ArgContext::Bind(const UnboundConversion* unbound, FormatConversionSpecImplFriend::SetPrecision(precision, bound); if (force_left) { - Flags flags = unbound->flags; - flags.left = true; - FormatConversionSpecImplFriend::SetFlags(flags, bound); + FormatConversionSpecImplFriend::SetFlags(unbound->flags | Flags::kLeft, + bound); } else { FormatConversionSpecImplFriend::SetFlags(unbound->flags, bound); } diff --git a/absl/strings/internal/str_format/extension.cc b/absl/strings/internal/str_format/extension.cc index bb0d96cf..484f6ebf 100644 --- a/absl/strings/internal/str_format/extension.cc +++ b/absl/strings/internal/str_format/extension.cc @@ -23,13 +23,13 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace str_format_internal { -std::string Flags::ToString() const { +std::string FlagsToString(Flags v) { std::string s; - s.append(left ? "-" : ""); - s.append(show_pos ? "+" : ""); - s.append(sign_col ? " " : ""); - s.append(alt ? "#" : ""); - s.append(zero ? "0" : ""); + s.append(FlagsContains(v, Flags::kLeft) ? "-" : ""); + s.append(FlagsContains(v, Flags::kShowPos) ? "+" : ""); + s.append(FlagsContains(v, Flags::kSignCol) ? " " : ""); + s.append(FlagsContains(v, Flags::kAlt) ? "#" : ""); + s.append(FlagsContains(v, Flags::kZero) ? "0" : ""); return s; } diff --git a/absl/strings/internal/str_format/extension.h b/absl/strings/internal/str_format/extension.h index a9b9e137..55cbb56d 100644 --- a/absl/strings/internal/str_format/extension.h +++ b/absl/strings/internal/str_format/extension.h @@ -128,19 +128,33 @@ class FormatSinkImpl { char buf_[1024]; }; -struct Flags { - bool basic : 1; // fastest conversion: no flags, width, or precision - bool left : 1; // "-" - bool show_pos : 1; // "+" - bool sign_col : 1; // " " - bool alt : 1; // "#" - bool zero : 1; // "0" - std::string ToString() const; - friend std::ostream& operator<<(std::ostream& os, const Flags& v) { - return os << v.ToString(); - } +enum class Flags : uint8_t { + kBasic = 0, + kLeft = 1 << 0, + kShowPos = 1 << 1, + kSignCol = 1 << 2, + kAlt = 1 << 3, + kZero = 1 << 4, + // This is not a real flag. It just exists to turn off kBasic when no other + // flags are set. This is for when width/precision are specified. + kNonBasic = 1 << 5, }; +constexpr Flags operator|(Flags a, Flags b) { + return static_cast<Flags>(static_cast<uint8_t>(a) | static_cast<uint8_t>(b)); +} + +constexpr bool FlagsContains(Flags haystack, Flags needle) { + return (static_cast<uint8_t>(haystack) & static_cast<uint8_t>(needle)) == + static_cast<uint8_t>(needle); +} + +std::string FlagsToString(Flags v); + +inline std::ostream& operator<<(std::ostream& os, Flags v) { + return os << FlagsToString(v); +} + // clang-format off #define ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(X_VAL, X_SEP) \ /* text */ \ @@ -257,12 +271,16 @@ struct FormatConversionSpecImplFriend; class FormatConversionSpecImpl { public: // Width and precison are not specified, no flags are set. - bool is_basic() const { return flags_.basic; } - bool has_left_flag() const { return flags_.left; } - bool has_show_pos_flag() const { return flags_.show_pos; } - bool has_sign_col_flag() const { return flags_.sign_col; } - bool has_alt_flag() const { return flags_.alt; } - bool has_zero_flag() const { return flags_.zero; } + bool is_basic() const { return flags_ == Flags::kBasic; } + bool has_left_flag() const { return FlagsContains(flags_, Flags::kLeft); } + bool has_show_pos_flag() const { + return FlagsContains(flags_, Flags::kShowPos); + } + bool has_sign_col_flag() const { + return FlagsContains(flags_, Flags::kSignCol); + } + bool has_alt_flag() const { return FlagsContains(flags_, Flags::kAlt); } + bool has_zero_flag() const { return FlagsContains(flags_, Flags::kZero); } FormatConversionChar conversion_char() const { // Keep this field first in the struct . It generates better code when @@ -306,7 +324,7 @@ struct FormatConversionSpecImplFriend final { conv->precision_ = p; } static std::string FlagsToString(const FormatConversionSpecImpl& spec) { - return spec.flags_.ToString(); + return str_format_internal::FlagsToString(spec.flags_); } }; diff --git a/absl/strings/internal/str_format/parser.cc b/absl/strings/internal/str_format/parser.cc index f308d023..2c9c07da 100644 --- a/absl/strings/internal/str_format/parser.cc +++ b/absl/strings/internal/str_format/parser.cc @@ -34,60 +34,67 @@ namespace str_format_internal { using CC = FormatConversionCharInternal; using LM = LengthMod; +// Abbreviations to fit in the table below. +constexpr auto f_sign = Flags::kSignCol; +constexpr auto f_alt = Flags::kAlt; +constexpr auto f_pos = Flags::kShowPos; +constexpr auto f_left = Flags::kLeft; +constexpr auto f_zero = Flags::kZero; + ABSL_CONST_INIT const ConvTag kTags[256] = { - {}, {}, {}, {}, {}, {}, {}, {}, // 00-07 - {}, {}, {}, {}, {}, {}, {}, {}, // 08-0f - {}, {}, {}, {}, {}, {}, {}, {}, // 10-17 - {}, {}, {}, {}, {}, {}, {}, {}, // 18-1f - {}, {}, {}, {}, {}, {}, {}, {}, // 20-27 - {}, {}, {}, {}, {}, {}, {}, {}, // 28-2f - {}, {}, {}, {}, {}, {}, {}, {}, // 30-37 - {}, {}, {}, {}, {}, {}, {}, {}, // 38-3f - {}, CC::A, {}, {}, {}, CC::E, CC::F, CC::G, // @ABCDEFG - {}, {}, {}, {}, LM::L, {}, {}, {}, // HIJKLMNO - {}, {}, {}, {}, {}, {}, {}, {}, // PQRSTUVW - CC::X, {}, {}, {}, {}, {}, {}, {}, // XYZ[\]^_ - {}, CC::a, {}, CC::c, CC::d, CC::e, CC::f, CC::g, // `abcdefg - LM::h, CC::i, LM::j, {}, LM::l, {}, CC::n, CC::o, // hijklmno - CC::p, LM::q, {}, CC::s, LM::t, CC::u, {}, {}, // pqrstuvw - CC::x, {}, LM::z, {}, {}, {}, {}, {}, // xyz{|}! - {}, {}, {}, {}, {}, {}, {}, {}, // 80-87 - {}, {}, {}, {}, {}, {}, {}, {}, // 88-8f - {}, {}, {}, {}, {}, {}, {}, {}, // 90-97 - {}, {}, {}, {}, {}, {}, {}, {}, // 98-9f - {}, {}, {}, {}, {}, {}, {}, {}, // a0-a7 - {}, {}, {}, {}, {}, {}, {}, {}, // a8-af - {}, {}, {}, {}, {}, {}, {}, {}, // b0-b7 - {}, {}, {}, {}, {}, {}, {}, {}, // b8-bf - {}, {}, {}, {}, {}, {}, {}, {}, // c0-c7 - {}, {}, {}, {}, {}, {}, {}, {}, // c8-cf - {}, {}, {}, {}, {}, {}, {}, {}, // d0-d7 - {}, {}, {}, {}, {}, {}, {}, {}, // d8-df - {}, {}, {}, {}, {}, {}, {}, {}, // e0-e7 - {}, {}, {}, {}, {}, {}, {}, {}, // e8-ef - {}, {}, {}, {}, {}, {}, {}, {}, // f0-f7 - {}, {}, {}, {}, {}, {}, {}, {}, // f8-ff + {}, {}, {}, {}, {}, {}, {}, {}, // 00-07 + {}, {}, {}, {}, {}, {}, {}, {}, // 08-0f + {}, {}, {}, {}, {}, {}, {}, {}, // 10-17 + {}, {}, {}, {}, {}, {}, {}, {}, // 18-1f + f_sign, {}, {}, f_alt, {}, {}, {}, {}, // !"#$%&' + {}, {}, {}, f_pos, {}, f_left, {}, {}, // ()*+,-./ + f_zero, {}, {}, {}, {}, {}, {}, {}, // 01234567 + {}, {}, {}, {}, {}, {}, {}, {}, // 89:;<=>? + {}, CC::A, {}, {}, {}, CC::E, CC::F, CC::G, // @ABCDEFG + {}, {}, {}, {}, LM::L, {}, {}, {}, // HIJKLMNO + {}, {}, {}, {}, {}, {}, {}, {}, // PQRSTUVW + CC::X, {}, {}, {}, {}, {}, {}, {}, // XYZ[\]^_ + {}, CC::a, {}, CC::c, CC::d, CC::e, CC::f, CC::g, // `abcdefg + LM::h, CC::i, LM::j, {}, LM::l, {}, CC::n, CC::o, // hijklmno + CC::p, LM::q, {}, CC::s, LM::t, CC::u, {}, {}, // pqrstuvw + CC::x, {}, LM::z, {}, {}, {}, {}, {}, // xyz{|}! + {}, {}, {}, {}, {}, {}, {}, {}, // 80-87 + {}, {}, {}, {}, {}, {}, {}, {}, // 88-8f + {}, {}, {}, {}, {}, {}, {}, {}, // 90-97 + {}, {}, {}, {}, {}, {}, {}, {}, // 98-9f + {}, {}, {}, {}, {}, {}, {}, {}, // a0-a7 + {}, {}, {}, {}, {}, {}, {}, {}, // a8-af + {}, {}, {}, {}, {}, {}, {}, {}, // b0-b7 + {}, {}, {}, {}, {}, {}, {}, {}, // b8-bf + {}, {}, {}, {}, {}, {}, {}, {}, // c0-c7 + {}, {}, {}, {}, {}, {}, {}, {}, // c8-cf + {}, {}, {}, {}, {}, {}, {}, {}, // d0-d7 + {}, {}, {}, {}, {}, {}, {}, {}, // d8-df + {}, {}, {}, {}, {}, {}, {}, {}, // e0-e7 + {}, {}, {}, {}, {}, {}, {}, {}, // e8-ef + {}, {}, {}, {}, {}, {}, {}, {}, // f0-f7 + {}, {}, {}, {}, {}, {}, {}, {}, // f8-ff }; namespace { bool CheckFastPathSetting(const UnboundConversion& conv) { - bool should_be_basic = !conv.flags.left && // - !conv.flags.show_pos && // - !conv.flags.sign_col && // - !conv.flags.alt && // - !conv.flags.zero && // - (conv.width.value() == -1) && - (conv.precision.value() == -1); - if (should_be_basic != conv.flags.basic) { + bool width_precision_needed = + conv.width.value() >= 0 || conv.precision.value() >= 0; + if (width_precision_needed && conv.flags == Flags::kBasic) { fprintf(stderr, "basic=%d left=%d show_pos=%d sign_col=%d alt=%d zero=%d " "width=%d precision=%d\n", - conv.flags.basic, conv.flags.left, conv.flags.show_pos, - conv.flags.sign_col, conv.flags.alt, conv.flags.zero, - conv.width.value(), conv.precision.value()); + conv.flags == Flags::kBasic ? 1 : 0, + FlagsContains(conv.flags, Flags::kLeft) ? 1 : 0, + FlagsContains(conv.flags, Flags::kShowPos) ? 1 : 0, + FlagsContains(conv.flags, Flags::kSignCol) ? 1 : 0, + FlagsContains(conv.flags, Flags::kAlt) ? 1 : 0, + FlagsContains(conv.flags, Flags::kZero) ? 1 : 0, conv.width.value(), + conv.precision.value()); + return false; } - return should_be_basic == conv.flags.basic; + return true; } template <bool is_positional> @@ -131,40 +138,21 @@ const char *ConsumeConversion(const char *pos, const char *const end, ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); // We should start with the basic flag on. - assert(conv->flags.basic); + assert(conv->flags == Flags::kBasic); // Any non alpha character makes this conversion not basic. // This includes flags (-+ #0), width (1-9, *) or precision (.). // All conversion characters and length modifiers are alpha characters. if (c < 'A') { - conv->flags.basic = false; - - for (; c <= '0';) { - // FIXME: We might be able to speed this up reusing the lookup table from - // above. It might require changing Flags to be a plain integer where we - // can |= a value. - switch (c) { - case '-': - conv->flags.left = true; - break; - case '+': - conv->flags.show_pos = true; - break; - case ' ': - conv->flags.sign_col = true; - break; - case '#': - conv->flags.alt = true; - break; - case '0': - conv->flags.zero = true; - break; - default: - goto flags_done; + while (c <= '0') { + auto tag = GetTagForChar(c); + if (tag.is_flags()) { + conv->flags = conv->flags | tag.as_flags(); + ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); + } else { + break; } - ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); } -flags_done: if (c <= '9') { if (c >= '0') { @@ -173,12 +161,12 @@ flags_done: if (ABSL_PREDICT_FALSE(*next_arg != 0)) return nullptr; // Positional conversion. *next_arg = -1; - conv->flags = Flags(); - conv->flags.basic = true; return ConsumeConversion<true>(original_pos, end, conv, next_arg); } + conv->flags = conv->flags | Flags::kNonBasic; conv->width.set_value(maybe_width); } else if (c == '*') { + conv->flags = conv->flags | Flags::kNonBasic; ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); if (is_positional) { if (ABSL_PREDICT_FALSE(c < '1' || c > '9')) return nullptr; @@ -192,6 +180,7 @@ flags_done: } if (c == '.') { + conv->flags = conv->flags | Flags::kNonBasic; ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR(); if (std::isdigit(c)) { conv->precision.set_value(parse_digits()); diff --git a/absl/strings/internal/str_format/parser.h b/absl/strings/internal/str_format/parser.h index 6504dd3d..ad8646ed 100644 --- a/absl/strings/internal/str_format/parser.h +++ b/absl/strings/internal/str_format/parser.h @@ -41,10 +41,7 @@ std::string LengthModToString(LengthMod v); // The analyzed properties of a single specified conversion. struct UnboundConversion { - UnboundConversion() - : flags() /* This is required to zero all the fields of flags. */ { - flags.basic = true; - } + UnboundConversion() {} class InputValue { public: @@ -79,7 +76,7 @@ struct UnboundConversion { InputValue width; InputValue precision; - Flags flags; + Flags flags = Flags::kBasic; LengthMod length_mod = LengthMod::none; FormatConversionChar conv = FormatConversionCharInternal::kNone; }; @@ -93,32 +90,43 @@ const char* ConsumeUnboundConversion(const char* p, const char* end, UnboundConversion* conv, int* next_arg); // Helper tag class for the table below. -// It allows fast `char -> ConversionChar/LengthMod` checking and +// It allows fast `char -> ConversionChar/LengthMod/Flags` checking and // conversions. class ConvTag { public: constexpr ConvTag(FormatConversionChar conversion_char) // NOLINT - : tag_(static_cast<int8_t>(conversion_char)) {} - // We invert the length modifiers to make them negative so that we can easily - // test for them. + : tag_(static_cast<uint8_t>(conversion_char)) {} constexpr ConvTag(LengthMod length_mod) // NOLINT - : tag_(~static_cast<std::int8_t>(length_mod)) {} - // Everything else is -128, which is negative to make is_conv() simpler. - constexpr ConvTag() : tag_(-128) {} + : tag_(0x80 | static_cast<uint8_t>(length_mod)) {} + constexpr ConvTag(Flags flags) // NOLINT + : tag_(0xc0 | static_cast<uint8_t>(flags)) {} + constexpr ConvTag() : tag_(0xFF) {} + + bool is_conv() const { return (tag_ & 0x80) == 0; } + bool is_length() const { return (tag_ & 0xC0) == 0x80; } + bool is_flags() const { return (tag_ & 0xE0) == 0xC0; } - bool is_conv() const { return tag_ >= 0; } - bool is_length() const { return tag_ < 0 && tag_ != -128; } FormatConversionChar as_conv() const { assert(is_conv()); + assert(!is_length()); + assert(!is_flags()); return static_cast<FormatConversionChar>(tag_); } LengthMod as_length() const { + assert(!is_conv()); assert(is_length()); - return static_cast<LengthMod>(~tag_); + assert(!is_flags()); + return static_cast<LengthMod>(tag_ & 0x3F); + } + Flags as_flags() const { + assert(!is_conv()); + assert(!is_length()); + assert(is_flags()); + return static_cast<Flags>(tag_ & 0x1F); } private: - std::int8_t tag_; + uint8_t tag_; }; extern const ConvTag kTags[256]; diff --git a/absl/strings/internal/str_format/parser_test.cc b/absl/strings/internal/str_format/parser_test.cc index a5fa1c79..fe0d2963 100644 --- a/absl/strings/internal/str_format/parser_test.cc +++ b/absl/strings/internal/str_format/parser_test.cc @@ -270,15 +270,22 @@ TEST_F(ConsumeUnboundConversionTest, Flags) { for (int k = 0; k < kNumFlags; ++k) if ((i >> k) & 1) fmt += kAllFlags[k]; // flag order shouldn't matter - if (rev == 1) { std::reverse(fmt.begin(), fmt.end()); } + if (rev == 1) { + std::reverse(fmt.begin(), fmt.end()); + } fmt += 'd'; SCOPED_TRACE(fmt); EXPECT_TRUE(Run(fmt.c_str())); - EXPECT_EQ(fmt.find('-') == std::string::npos, !o.flags.left); - EXPECT_EQ(fmt.find('+') == std::string::npos, !o.flags.show_pos); - EXPECT_EQ(fmt.find(' ') == std::string::npos, !o.flags.sign_col); - EXPECT_EQ(fmt.find('#') == std::string::npos, !o.flags.alt); - EXPECT_EQ(fmt.find('0') == std::string::npos, !o.flags.zero); + EXPECT_EQ(fmt.find('-') == std::string::npos, + !FlagsContains(o.flags, Flags::kLeft)); + EXPECT_EQ(fmt.find('+') == std::string::npos, + !FlagsContains(o.flags, Flags::kShowPos)); + EXPECT_EQ(fmt.find(' ') == std::string::npos, + !FlagsContains(o.flags, Flags::kSignCol)); + EXPECT_EQ(fmt.find('#') == std::string::npos, + !FlagsContains(o.flags, Flags::kAlt)); + EXPECT_EQ(fmt.find('0') == std::string::npos, + !FlagsContains(o.flags, Flags::kZero)); } } } @@ -288,14 +295,14 @@ TEST_F(ConsumeUnboundConversionTest, BasicFlag) { for (const char* fmt : {"d", "llx", "G", "1$X"}) { SCOPED_TRACE(fmt); EXPECT_TRUE(Run(fmt)); - EXPECT_TRUE(o.flags.basic); + EXPECT_EQ(o.flags, Flags::kBasic); } // Flag is off for (const char* fmt : {"3d", ".llx", "-G", "1$#X"}) { SCOPED_TRACE(fmt); EXPECT_TRUE(Run(fmt)); - EXPECT_FALSE(o.flags.basic); + EXPECT_NE(o.flags, Flags::kBasic); } } diff --git a/absl/synchronization/blocking_counter.cc b/absl/synchronization/blocking_counter.cc index 3cea7aed..d2f82da3 100644 --- a/absl/synchronization/blocking_counter.cc +++ b/absl/synchronization/blocking_counter.cc @@ -14,41 +14,51 @@ #include "absl/synchronization/blocking_counter.h" +#include <atomic> + #include "absl/base/internal/raw_logging.h" namespace absl { ABSL_NAMESPACE_BEGIN -// Return whether int *arg is zero. -static bool IsZero(void *arg) { - return 0 == *reinterpret_cast<int *>(arg); +namespace { + +// Return whether int *arg is true. +bool IsDone(void *arg) { return *reinterpret_cast<bool *>(arg); } + +} // namespace + +BlockingCounter::BlockingCounter(int initial_count) + : count_(initial_count), + num_waiting_(0), + done_{initial_count == 0 ? true : false} { + ABSL_RAW_CHECK(initial_count >= 0, "BlockingCounter initial_count negative"); } bool BlockingCounter::DecrementCount() { - MutexLock l(&lock_); - count_--; - if (count_ < 0) { - ABSL_RAW_LOG( - FATAL, - "BlockingCounter::DecrementCount() called too many times. count=%d", - count_); + int count = count_.fetch_sub(1, std::memory_order_acq_rel) - 1; + ABSL_RAW_CHECK(count >= 0, + "BlockingCounter::DecrementCount() called too many times"); + if (count == 0) { + MutexLock l(&lock_); + done_ = true; + return true; } - return count_ == 0; + return false; } void BlockingCounter::Wait() { MutexLock l(&this->lock_); - ABSL_RAW_CHECK(count_ >= 0, "BlockingCounter underflow"); // only one thread may call Wait(). To support more than one thread, // implement a counter num_to_exit, like in the Barrier class. ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()"); num_waiting_++; - this->lock_.Await(Condition(IsZero, &this->count_)); + this->lock_.Await(Condition(IsDone, &this->done_)); - // At this point, We know that all threads executing DecrementCount have - // released the lock, and so will not touch this object again. + // At this point, we know that all threads executing DecrementCount + // will not touch this object again. // Therefore, the thread calling this method is free to delete the object // after we return from this method. } diff --git a/absl/synchronization/blocking_counter.h b/absl/synchronization/blocking_counter.h index 1f53f9f2..1908fdb1 100644 --- a/absl/synchronization/blocking_counter.h +++ b/absl/synchronization/blocking_counter.h @@ -20,6 +20,8 @@ #ifndef ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_ #define ABSL_SYNCHRONIZATION_BLOCKING_COUNTER_H_ +#include <atomic> + #include "absl/base/thread_annotations.h" #include "absl/synchronization/mutex.h" @@ -60,8 +62,7 @@ ABSL_NAMESPACE_BEGIN // class BlockingCounter { public: - explicit BlockingCounter(int initial_count) - : count_(initial_count), num_waiting_(0) {} + explicit BlockingCounter(int initial_count); BlockingCounter(const BlockingCounter&) = delete; BlockingCounter& operator=(const BlockingCounter&) = delete; @@ -89,8 +90,9 @@ class BlockingCounter { private: Mutex lock_; - int count_ ABSL_GUARDED_BY(lock_); + std::atomic<int> count_; int num_waiting_ ABSL_GUARDED_BY(lock_); + bool done_ ABSL_GUARDED_BY(lock_); }; ABSL_NAMESPACE_END diff --git a/absl/synchronization/blocking_counter_test.cc b/absl/synchronization/blocking_counter_test.cc index 2926224a..06885f57 100644 --- a/absl/synchronization/blocking_counter_test.cc +++ b/absl/synchronization/blocking_counter_test.cc @@ -63,6 +63,18 @@ TEST(BlockingCounterTest, BasicFunctionality) { } } +TEST(BlockingCounterTest, WaitZeroInitialCount) { + BlockingCounter counter(0); + counter.Wait(); +} + +#if GTEST_HAS_DEATH_TEST +TEST(BlockingCounterTest, WaitNegativeInitialCount) { + EXPECT_DEATH(BlockingCounter counter(-1), + "BlockingCounter initial_count negative"); +} +#endif + } // namespace ABSL_NAMESPACE_END } // namespace absl |