diff options
Diffstat (limited to 'absl/flags/internal')
-rw-r--r-- | absl/flags/internal/flag.cc | 147 | ||||
-rw-r--r-- | absl/flags/internal/flag.h | 222 | ||||
-rw-r--r-- | absl/flags/internal/usage_test.cc | 6 |
3 files changed, 315 insertions, 60 deletions
diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc index 65d0e58f..981f19fd 100644 --- a/absl/flags/internal/flag.cc +++ b/absl/flags/internal/flag.cc @@ -22,14 +22,17 @@ #include <array> #include <atomic> +#include <cstring> #include <memory> -#include <new> #include <string> #include <typeinfo> +#include <vector> +#include "absl/base/attributes.h" #include "absl/base/call_once.h" #include "absl/base/casts.h" #include "absl/base/config.h" +#include "absl/base/const_init.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/optimization.h" #include "absl/flags/config.h" @@ -44,10 +47,9 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { -// The help message indicating that the commandline flag has been -// 'stripped'. It will not show up when doing "-help" and its -// variants. The flag is stripped if ABSL_FLAGS_STRIP_HELP is set to 1 -// before including absl/flags/flag.h +// The help message indicating that the commandline flag has been stripped. It +// will not show up when doing "-help" and its variants. The flag is stripped +// if ABSL_FLAGS_STRIP_HELP is set to 1 before including absl/flags/flag.h const char kStrippedFlagHelp[] = "\001\002\003\004 (unknown) \004\003\002\001"; namespace { @@ -78,9 +80,32 @@ class MutexRelock { absl::Mutex& mu_; }; +// This is a freelist of leaked flag values and guard for its access. +// When we can't guarantee it is safe to reuse the memory for flag values, +// we move the memory to the freelist where it lives indefinitely, so it can +// still be safely accessed. This also prevents leak checkers from complaining +// about the leaked memory that can no longer be accessed through any pointer. +ABSL_CONST_INIT absl::Mutex s_freelist_guard(absl::kConstInit); +ABSL_CONST_INIT std::vector<void*>* s_freelist = nullptr; + +void AddToFreelist(void* p) { + absl::MutexLock l(&s_freelist_guard); + if (!s_freelist) { + s_freelist = new std::vector<void*>; + } + s_freelist->push_back(p); +} + } // namespace /////////////////////////////////////////////////////////////////////////////// + +uint64_t NumLeakedFlagValues() { + absl::MutexLock l(&s_freelist_guard); + return s_freelist == nullptr ? 0u : s_freelist->size(); +} + +/////////////////////////////////////////////////////////////////////////////// // Persistent state of the flag data. class FlagImpl; @@ -97,7 +122,7 @@ class FlagState : public flags_internal::FlagStateInterface { counter_(counter) {} ~FlagState() override { - if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer && + if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kHeapAllocated && flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked) return; flags_internal::Delete(flag_impl_.op_, value_.heap_allocated); @@ -140,6 +165,33 @@ void DynValueDeleter::operator()(void* ptr) const { Delete(op, ptr); } +MaskedPointer::MaskedPointer(ptr_t rhs, bool is_candidate) : ptr_(rhs) { + if (is_candidate) { + ApplyMask(kUnprotectedReadCandidate); + } +} + +bool MaskedPointer::IsUnprotectedReadCandidate() const { + return CheckMask(kUnprotectedReadCandidate); +} + +bool MaskedPointer::HasBeenRead() const { return CheckMask(kHasBeenRead); } + +void MaskedPointer::Set(FlagOpFn op, const void* src, bool is_candidate) { + flags_internal::Copy(op, src, Ptr()); + if (is_candidate) { + ApplyMask(kUnprotectedReadCandidate); + } +} +void MaskedPointer::MarkAsRead() { ApplyMask(kHasBeenRead); } + +void MaskedPointer::ApplyMask(mask_t mask) { + ptr_ = reinterpret_cast<ptr_t>(reinterpret_cast<mask_t>(ptr_) | mask); +} +bool MaskedPointer::CheckMask(mask_t mask) const { + return (reinterpret_cast<mask_t>(ptr_) & mask) != 0; +} + void FlagImpl::Init() { new (&data_guard_) absl::Mutex; @@ -174,11 +226,16 @@ void FlagImpl::Init() { (*default_value_.gen_func)(AtomicBufferValue()); break; } - case FlagValueStorageKind::kAlignedBuffer: + case FlagValueStorageKind::kHeapAllocated: // For this storage kind the default_value_ always points to gen_func // during initialization. assert(def_kind == FlagDefaultKind::kGenFunc); - (*default_value_.gen_func)(AlignedBufferValue()); + // Flag value initially points to the internal buffer. + MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire); + (*default_value_.gen_func)(ptr_value.Ptr()); + // Default value is a candidate for an unprotected read. + PtrStorage().store(MaskedPointer(ptr_value.Ptr(), true), + std::memory_order_release); break; } seq_lock_.MarkInitialized(); @@ -234,7 +291,7 @@ std::unique_ptr<void, DynValueDeleter> FlagImpl::MakeInitValue() const { return {res, DynValueDeleter{op_}}; } -void FlagImpl::StoreValue(const void* src) { +void FlagImpl::StoreValue(const void* src, ValueSource source) { switch (ValueStorageKind()) { case FlagValueStorageKind::kValueAndInitBit: case FlagValueStorageKind::kOneWordAtomic: { @@ -249,8 +306,27 @@ void FlagImpl::StoreValue(const void* src) { seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_)); break; } - case FlagValueStorageKind::kAlignedBuffer: - Copy(op_, src, AlignedBufferValue()); + case FlagValueStorageKind::kHeapAllocated: + MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire); + + if (ptr_value.IsUnprotectedReadCandidate() && ptr_value.HasBeenRead()) { + // If current value is a candidate for an unprotected read and if it was + // already read at least once, follow up reads (if any) are done without + // mutex protection. We can't guarantee it is safe to reuse this memory + // since it may have been accessed by another thread concurrently, so + // instead we move the memory to a freelist so it can still be safely + // accessed, and allocate a new one for the new value. + AddToFreelist(ptr_value.Ptr()); + ptr_value = MaskedPointer(Clone(op_, src), source == kCommandLine); + } else { + // Current value either was set programmatically or was never read. + // We can reuse the memory since all accesses to this value (if any) + // were protected by mutex. That said, if a new value comes from command + // line it now becomes a candidate for an unprotected read. + ptr_value.Set(op_, src, source == kCommandLine); + } + + PtrStorage().store(ptr_value, std::memory_order_release); seq_lock_.IncrementModificationCount(); break; } @@ -305,9 +381,10 @@ std::string FlagImpl::CurrentValue() const { ReadSequenceLockedData(cloned.get()); return flags_internal::Unparse(op_, cloned.get()); } - case FlagValueStorageKind::kAlignedBuffer: { + case FlagValueStorageKind::kHeapAllocated: { absl::MutexLock l(guard); - return flags_internal::Unparse(op_, AlignedBufferValue()); + return flags_internal::Unparse( + op_, PtrStorage().load(std::memory_order_acquire).Ptr()); } } @@ -370,10 +447,12 @@ std::unique_ptr<FlagStateInterface> FlagImpl::SaveState() { return absl::make_unique<FlagState>(*this, cloned, modified, on_command_line, ModificationCount()); } - case FlagValueStorageKind::kAlignedBuffer: { + case FlagValueStorageKind::kHeapAllocated: { return absl::make_unique<FlagState>( - *this, flags_internal::Clone(op_, AlignedBufferValue()), modified, - on_command_line, ModificationCount()); + *this, + flags_internal::Clone( + op_, PtrStorage().load(std::memory_order_acquire).Ptr()), + modified, on_command_line, ModificationCount()); } } return nullptr; @@ -388,11 +467,11 @@ bool FlagImpl::RestoreState(const FlagState& flag_state) { switch (ValueStorageKind()) { case FlagValueStorageKind::kValueAndInitBit: case FlagValueStorageKind::kOneWordAtomic: - StoreValue(&flag_state.value_.one_word); + StoreValue(&flag_state.value_.one_word, kProgrammaticChange); break; case FlagValueStorageKind::kSequenceLocked: - case FlagValueStorageKind::kAlignedBuffer: - StoreValue(flag_state.value_.heap_allocated); + case FlagValueStorageKind::kHeapAllocated: + StoreValue(flag_state.value_.heap_allocated, kProgrammaticChange); break; } @@ -411,11 +490,6 @@ StorageT* FlagImpl::OffsetValue() const { return reinterpret_cast<StorageT*>(p + offset); } -void* FlagImpl::AlignedBufferValue() const { - assert(ValueStorageKind() == FlagValueStorageKind::kAlignedBuffer); - return OffsetValue<void>(); -} - std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const { assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked); return OffsetValue<std::atomic<uint64_t>>(); @@ -427,6 +501,11 @@ std::atomic<int64_t>& FlagImpl::OneWordValue() const { return OffsetValue<FlagOneWordValue>()->value; } +std::atomic<MaskedPointer>& FlagImpl::PtrStorage() const { + assert(ValueStorageKind() == FlagValueStorageKind::kHeapAllocated); + return OffsetValue<FlagMaskedPointerValue>()->value; +} + // Attempts to parse supplied `value` string using parsing routine in the `flag` // argument. If parsing successful, this function replaces the dst with newly // parsed value. In case if any error is encountered in either step, the error @@ -460,9 +539,17 @@ void FlagImpl::Read(void* dst) const { ReadSequenceLockedData(dst); break; } - case FlagValueStorageKind::kAlignedBuffer: { + case FlagValueStorageKind::kHeapAllocated: { absl::MutexLock l(guard); - flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst); + MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire); + + flags_internal::CopyConstruct(op_, ptr_value.Ptr(), dst); + + // For unprotected read candidates, mark that the value as has been read. + if (ptr_value.IsUnprotectedReadCandidate() && !ptr_value.HasBeenRead()) { + ptr_value.MarkAsRead(); + PtrStorage().store(ptr_value, std::memory_order_release); + } break; } } @@ -513,7 +600,7 @@ void FlagImpl::Write(const void* src) { } } - StoreValue(src); + StoreValue(src, kProgrammaticChange); } // Sets the value of the flag based on specified string `value`. If the flag @@ -534,7 +621,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode, auto tentative_value = TryParse(value, err); if (!tentative_value) return false; - StoreValue(tentative_value.get()); + StoreValue(tentative_value.get(), source); if (source == kCommandLine) { on_command_line_ = true; @@ -555,7 +642,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode, auto tentative_value = TryParse(value, err); if (!tentative_value) return false; - StoreValue(tentative_value.get()); + StoreValue(tentative_value.get(), source); break; } case SET_FLAGS_DEFAULT: { @@ -573,7 +660,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode, if (!modified_) { // Need to set both default value *and* current, in this case. - StoreValue(default_value_.dynamic_value); + StoreValue(default_value_.dynamic_value, source); modified_ = false; } break; diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h index 2e6e6b87..a0be31d9 100644 --- a/absl/flags/internal/flag.h +++ b/absl/flags/internal/flag.h @@ -22,7 +22,6 @@ #include <atomic> #include <cstring> #include <memory> -#include <new> #include <string> #include <type_traits> #include <typeinfo> @@ -296,11 +295,8 @@ constexpr FlagDefaultArg DefaultArg(char) { } /////////////////////////////////////////////////////////////////////////////// -// Flag current value auxiliary structs. - -constexpr int64_t UninitializedFlagValue() { - return static_cast<int64_t>(0xababababababababll); -} +// Flag storage selector traits. Each trait indicates what kind of storage kind +// to use for the flag value. template <typename T> using FlagUseValueAndInitBitStorage = @@ -322,9 +318,11 @@ enum class FlagValueStorageKind : uint8_t { kValueAndInitBit = 0, kOneWordAtomic = 1, kSequenceLocked = 2, - kAlignedBuffer = 3, + kHeapAllocated = 3, }; +// This constexpr function returns the storage kind for the given flag value +// type. template <typename T> static constexpr FlagValueStorageKind StorageKind() { return FlagUseValueAndInitBitStorage<T>::value @@ -333,14 +331,24 @@ static constexpr FlagValueStorageKind StorageKind() { ? FlagValueStorageKind::kOneWordAtomic : FlagUseSequenceLockStorage<T>::value ? FlagValueStorageKind::kSequenceLocked - : FlagValueStorageKind::kAlignedBuffer; + : FlagValueStorageKind::kHeapAllocated; } +// This is a base class for the storage classes used by kOneWordAtomic and +// kValueAndInitBit storage kinds. It literally just stores the one word value +// as an atomic. By default, it is initialized to a magic value that is unlikely +// a valid value for the flag value type. struct FlagOneWordValue { + constexpr static int64_t Uninitialized() { + return static_cast<int64_t>(0xababababababababll); + } + + constexpr FlagOneWordValue() : value(Uninitialized()) {} constexpr explicit FlagOneWordValue(int64_t v) : value(v) {} std::atomic<int64_t> value; }; +// This class represents a memory layout used by kValueAndInitBit storage kind. template <typename T> struct alignas(8) FlagValueAndInitBit { T value; @@ -349,16 +357,91 @@ struct alignas(8) FlagValueAndInitBit { uint8_t init; }; +// This class implements an aligned pointer with two options stored via masks +// in unused bits of the pointer value (due to alignment requirement). +// - IsUnprotectedReadCandidate - indicates that the value can be switched to +// unprotected read without a lock. +// - HasBeenRead - indicates that the value has been read at least once. +// - AllowsUnprotectedRead - combination of the two options above and indicates +// that the value can now be read without a lock. +// Further details of these options and their use is covered in the description +// of the FlagValue<T, FlagValueStorageKind::kHeapAllocated> specialization. +class MaskedPointer { + public: + using mask_t = uintptr_t; + using ptr_t = void*; + + static constexpr int RequiredAlignment() { return 4; } + + constexpr explicit MaskedPointer(ptr_t rhs) : ptr_(rhs) {} + MaskedPointer(ptr_t rhs, bool is_candidate); + + void* Ptr() const { + return reinterpret_cast<void*>(reinterpret_cast<mask_t>(ptr_) & + kPtrValueMask); + } + bool AllowsUnprotectedRead() const { + return (reinterpret_cast<mask_t>(ptr_) & kAllowsUnprotectedRead) == + kAllowsUnprotectedRead; + } + bool IsUnprotectedReadCandidate() const; + bool HasBeenRead() const; + + void Set(FlagOpFn op, const void* src, bool is_candidate); + void MarkAsRead(); + + private: + // Masks + // Indicates that the flag value either default or originated from command + // line. + static constexpr mask_t kUnprotectedReadCandidate = 0x1u; + // Indicates that flag has been read. + static constexpr mask_t kHasBeenRead = 0x2u; + static constexpr mask_t kAllowsUnprotectedRead = + kUnprotectedReadCandidate | kHasBeenRead; + static constexpr mask_t kPtrValueMask = ~kAllowsUnprotectedRead; + + void ApplyMask(mask_t mask); + bool CheckMask(mask_t mask) const; + + ptr_t ptr_; +}; + +// This class implements a type erased storage of the heap allocated flag value. +// It is used as a base class for the storage class for kHeapAllocated storage +// kind. The initial_buffer is expected to have an alignment of at least +// MaskedPointer::RequiredAlignment(), so that the bits used by the +// MaskedPointer to store masks are set to 0. This guarantees that value starts +// in an uninitialized state. +struct FlagMaskedPointerValue { + constexpr explicit FlagMaskedPointerValue(MaskedPointer::ptr_t initial_buffer) + : value(MaskedPointer(initial_buffer)) {} + + std::atomic<MaskedPointer> value; +}; + +// This is the forward declaration for the template that represents a storage +// for the flag values. This template is expected to be explicitly specialized +// for each storage kind and it does not have a generic default +// implementation. template <typename T, FlagValueStorageKind Kind = flags_internal::StorageKind<T>()> struct FlagValue; +// This specialization represents the storage of flag values types with the +// kValueAndInitBit storage kind. It is based on the FlagOneWordValue class +// and relies on memory layout in FlagValueAndInitBit<T> to indicate that the +// value has been initialized or not. template <typename T> struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue { constexpr FlagValue() : FlagOneWordValue(0) {} bool Get(const SequenceLock&, T& dst) const { int64_t storage = value.load(std::memory_order_acquire); if (ABSL_PREDICT_FALSE(storage == 0)) { + // This assert is to ensure that the initialization inside FlagImpl::Init + // is able to set init member correctly. + static_assert(offsetof(FlagValueAndInitBit<T>, init) == sizeof(T), + "Unexpected memory layout of FlagValueAndInitBit"); return false; } dst = absl::bit_cast<FlagValueAndInitBit<T>>(storage).value; @@ -366,12 +449,16 @@ struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue { } }; +// This specialization represents the storage of flag values types with the +// kOneWordAtomic storage kind. It is based on the FlagOneWordValue class +// and relies on the magic uninitialized state of default constructed instead of +// FlagOneWordValue to indicate that the value has been initialized or not. template <typename T> struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue { - constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {} + constexpr FlagValue() : FlagOneWordValue() {} bool Get(const SequenceLock&, T& dst) const { int64_t one_word_val = value.load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) { + if (ABSL_PREDICT_FALSE(one_word_val == FlagOneWordValue::Uninitialized())) { return false; } std::memcpy(&dst, static_cast<const void*>(&one_word_val), sizeof(T)); @@ -379,6 +466,12 @@ struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue { } }; +// This specialization represents the storage of flag values types with the +// kSequenceLocked storage kind. This storage is used by trivially copyable +// types with size greater than 8 bytes. This storage relies on uninitialized +// state of the SequenceLock to indicate that the value has been initialized or +// not. This storage also provides lock-free read access to the underlying +// value once it is initialized. template <typename T> struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> { bool Get(const SequenceLock& lock, T& dst) const { @@ -392,11 +485,62 @@ struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> { std::atomic<uint64_t>) std::atomic<uint64_t> value_words[kNumWords]; }; +// This specialization represents the storage of flag values types with the +// kHeapAllocated storage kind. This is a storage of last resort and is used +// if none of other storage kinds are applicable. +// +// Generally speaking the values with this storage kind can't be accessed +// atomically and thus can't be read without holding a lock. If we would ever +// want to avoid the lock, we'd need to leak the old value every time new flag +// value is being set (since we are in danger of having a race condition +// otherwise). +// +// Instead of doing that, this implementation attempts to cater to some common +// use cases by allowing at most 2 values to be leaked - default value and +// value set from the command line. +// +// This specialization provides an initial buffer for the first flag value. This +// is where the default value is going to be stored. We attempt to reuse this +// buffer if possible, including storing the value set from the command line +// there. +// +// As long as we only read this value, we can access it without a lock (in +// practice we still use the lock for the very first read to be able set +// "has been read" option on this flag). +// +// If flag is specified on the command line we store the parsed value either +// in the internal buffer (if the default value never been read) or we leak the +// default value and allocate the new storage for the parse value. This value is +// also a candidate for an unprotected read. If flag is set programmatically +// after the command line is parsed, the storage for this value is going to be +// leaked. Note that in both scenarios we are not going to have a real leak. +// Instead we'll store the leaked value pointers in the internal freelist to +// avoid triggering the memory leak checker complains. +// +// If the flag is ever set programmatically, it stops being the candidate for an +// unprotected read, and any follow up access to the flag value requires a lock. +// Note that if the value if set programmatically before the command line is +// parsed, we can switch back to enabling unprotected reads for that value. template <typename T> -struct FlagValue<T, FlagValueStorageKind::kAlignedBuffer> { - bool Get(const SequenceLock&, T&) const { return false; } +struct FlagValue<T, FlagValueStorageKind::kHeapAllocated> + : FlagMaskedPointerValue { + // We const initialize the value with unmasked pointer to the internal buffer, + // making sure it is not a candidate for unprotected read. This way we can + // ensure Init is done before any access to the flag value. + constexpr FlagValue() : FlagMaskedPointerValue(&buffer[0]) {} + + bool Get(const SequenceLock&, T& dst) const { + MaskedPointer ptr_value = value.load(std::memory_order_acquire); - alignas(T) char value[sizeof(T)]; + if (ABSL_PREDICT_TRUE(ptr_value.AllowsUnprotectedRead())) { + ::new (static_cast<void*>(&dst)) T(*static_cast<T*>(ptr_value.Ptr())); + return true; + } + return false; + } + + alignas(MaskedPointer::RequiredAlignment()) alignas( + T) char buffer[sizeof(T)]{}; }; /////////////////////////////////////////////////////////////////////////////// @@ -425,6 +569,13 @@ struct DynValueDeleter { class FlagState; +// These are only used as constexpr global objects. +// They do not use a virtual destructor to simplify their implementation. +// They are not destroyed except at program exit, so leaks do not matter. +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wnon-virtual-dtor" +#endif class FlagImpl final : public CommandLineFlag { public: constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op, @@ -477,7 +628,7 @@ class FlagImpl final : public CommandLineFlag { // Used in read/write operations to validate source/target has correct type. // For example if flag is declared as absl::Flag<int> FLAGS_foo, a call to // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed - // int. To do that we pass the "assumed" type id (which is deduced from type + // int. To do that we pass the assumed type id (which is deduced from type // int) as an argument `type_id`, which is in turn is validated against the // type id stored in flag object by flag definition statement. void AssertValidType(FlagFastTypeId type_id, @@ -498,17 +649,13 @@ class FlagImpl final : public CommandLineFlag { void Init(); // Offset value access methods. One per storage kind. These methods to not - // respect const correctness, so be very carefull using them. + // respect const correctness, so be very careful using them. // This is a shared helper routine which encapsulates most of the magic. Since // it is only used inside the three routines below, which are defined in // flag.cc, we can define it in that file as well. template <typename StorageT> StorageT* OffsetValue() const; - // This is an accessor for a value stored in an aligned buffer storage - // used for non-trivially-copyable data types. - // Returns a mutable pointer to the start of a buffer. - void* AlignedBufferValue() const; // The same as above, but used for sequencelock-protected storage. std::atomic<uint64_t>* AtomicBufferValue() const; @@ -517,13 +664,16 @@ class FlagImpl final : public CommandLineFlag { // mutable reference to an atomic value. std::atomic<int64_t>& OneWordValue() const; + std::atomic<MaskedPointer>& PtrStorage() const; + // Attempts to parse supplied `value` string. If parsing is successful, // returns new value. Otherwise returns nullptr. std::unique_ptr<void, DynValueDeleter> TryParse(absl::string_view value, std::string& err) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); // Stores the flag value based on the pointer to the source. - void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + void StoreValue(const void* src, ValueSource source) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); // Copy the flag data, protected by `seq_lock_` into `dst`. // @@ -579,7 +729,7 @@ class FlagImpl final : public CommandLineFlag { const char* const name_; // The file name where ABSL_FLAG resides. const char* const filename_; - // Type-specific operations "vtable". + // Type-specific operations vtable. const FlagOpFn op_; // Help message literal or function to generate it. const FlagHelpMsg help_; @@ -624,6 +774,9 @@ class FlagImpl final : public CommandLineFlag { // problems. alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)]; }; +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif /////////////////////////////////////////////////////////////////////////////// // The Flag object parameterized by the flag's value type. This class implements @@ -711,16 +864,21 @@ class FlagImplPeer { // Implementation of Flag value specific operations routine. template <typename T> void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) { + struct AlignedSpace { + alignas(MaskedPointer::RequiredAlignment()) alignas(T) char buf[sizeof(T)]; + }; + using Allocator = std::allocator<AlignedSpace>; switch (op) { case FlagOp::kAlloc: { - std::allocator<T> alloc; - return std::allocator_traits<std::allocator<T>>::allocate(alloc, 1); + Allocator alloc; + return std::allocator_traits<Allocator>::allocate(alloc, 1); } case FlagOp::kDelete: { T* p = static_cast<T*>(v2); p->~T(); - std::allocator<T> alloc; - std::allocator_traits<std::allocator<T>>::deallocate(alloc, p, 1); + Allocator alloc; + std::allocator_traits<Allocator>::deallocate( + alloc, reinterpret_cast<AlignedSpace*>(p), 1); return nullptr; } case FlagOp::kCopy: @@ -754,8 +912,7 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) { // Round sizeof(FlagImp) to a multiple of alignof(FlagValue<T>) to get the // offset of the data. size_t round_to = alignof(FlagValue<T>); - size_t offset = - (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; + size_t offset = (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; return reinterpret_cast<void*>(offset); } } @@ -770,7 +927,8 @@ struct FlagRegistrarEmpty {}; template <typename T, bool do_register> class FlagRegistrar { public: - explicit FlagRegistrar(Flag<T>& flag, const char* filename) : flag_(flag) { + constexpr explicit FlagRegistrar(Flag<T>& flag, const char* filename) + : flag_(flag) { if (do_register) flags_internal::RegisterCommandLineFlag(flag_.impl_, filename); } @@ -780,15 +938,19 @@ class FlagRegistrar { return *this; } - // Make the registrar "die" gracefully as an empty struct on a line where + // Makes the registrar die gracefully as an empty struct on a line where // registration happens. Registrar objects are intended to live only as // temporary. - operator FlagRegistrarEmpty() const { return {}; } // NOLINT + constexpr operator FlagRegistrarEmpty() const { return {}; } // NOLINT private: Flag<T>& flag_; // Flag being registered (not owned). }; +/////////////////////////////////////////////////////////////////////////////// +// Test only API +uint64_t NumLeakedFlagValues(); + } // namespace flags_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/flags/internal/usage_test.cc b/absl/flags/internal/usage_test.cc index 6847386f..9b6d730c 100644 --- a/absl/flags/internal/usage_test.cc +++ b/absl/flags/internal/usage_test.cc @@ -22,6 +22,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/flags/config.h" #include "absl/flags/flag.h" #include "absl/flags/internal/parse.h" #include "absl/flags/internal/program_name.h" @@ -97,6 +98,11 @@ class UsageReportingTest : public testing::Test { flags::SetFlagsHelpMatchSubstr(""); flags::SetFlagsHelpFormat(flags::HelpFormat::kHumanReadable); } + void SetUp() override { +#if ABSL_FLAGS_STRIP_NAMES + GTEST_SKIP() << "This test requires flag names to be present"; +#endif + } private: absl::FlagSaver flag_saver_; |