summaryrefslogtreecommitdiff
path: root/absl/flags
diff options
context:
space:
mode:
Diffstat (limited to 'absl/flags')
-rw-r--r--absl/flags/BUILD.bazel1
-rw-r--r--absl/flags/CMakeLists.txt1
-rw-r--r--absl/flags/flag_test.cc200
-rw-r--r--absl/flags/internal/flag.cc147
-rw-r--r--absl/flags/internal/flag.h203
5 files changed, 470 insertions, 82 deletions
diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel
index cc513a09..7a8ec7e6 100644
--- a/absl/flags/BUILD.bazel
+++ b/absl/flags/BUILD.bazel
@@ -390,6 +390,7 @@ cc_test(
":flag",
":flag_internal",
":marshalling",
+ ":parse",
":reflection",
"//absl/base:core_headers",
"//absl/base:malloc_internal",
diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt
index 48cc8325..7376d117 100644
--- a/absl/flags/CMakeLists.txt
+++ b/absl/flags/CMakeLists.txt
@@ -340,6 +340,7 @@ absl_cc_test(
absl::flags_config
absl::flags_internal
absl::flags_marshalling
+ absl::flags_parse
absl::flags_reflection
absl::int128
absl::optional
diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc
index 8bc7cdfd..8af5bf7f 100644
--- a/absl/flags/flag_test.cc
+++ b/absl/flags/flag_test.cc
@@ -31,6 +31,7 @@
#include "absl/flags/declare.h"
#include "absl/flags/internal/flag.h"
#include "absl/flags/marshalling.h"
+#include "absl/flags/parse.h"
#include "absl/flags/reflection.h"
#include "absl/flags/usage_config.h"
#include "absl/numeric/int128.h"
@@ -126,9 +127,9 @@ TEST_F(FlagTest, Traits) {
#endif
EXPECT_EQ(flags::StorageKind<std::string>(),
- flags::FlagValueStorageKind::kAlignedBuffer);
+ flags::FlagValueStorageKind::kHeapAllocated);
EXPECT_EQ(flags::StorageKind<std::vector<std::string>>(),
- flags::FlagValueStorageKind::kAlignedBuffer);
+ flags::FlagValueStorageKind::kHeapAllocated);
EXPECT_EQ(flags::StorageKind<absl::int128>(),
flags::FlagValueStorageKind::kSequenceLocked);
@@ -267,7 +268,7 @@ TEST_F(FlagTest, TestFlagDeclaration) {
#if ABSL_FLAGS_STRIP_NAMES
// The intent of this helper struct and an expression below is to make sure that
// in the configuration where ABSL_FLAGS_STRIP_NAMES=1 registrar construction
-// (in cases of of no Tail calls like OnUpdate) is constexpr and thus can and
+// (in cases of no Tail calls like OnUpdate) is constexpr and thus can and
// should be completely optimized away, thus avoiding the cost/overhead of
// static initializers.
struct VerifyConsteval {
@@ -515,8 +516,10 @@ TEST_F(FlagTest, TestDefault) {
struct NonTriviallyCopyableAggregate {
NonTriviallyCopyableAggregate() = default;
+ // NOLINTNEXTLINE
NonTriviallyCopyableAggregate(const NonTriviallyCopyableAggregate& rhs)
: value(rhs.value) {}
+ // NOLINTNEXTLINE
NonTriviallyCopyableAggregate& operator=(
const NonTriviallyCopyableAggregate& rhs) {
value = rhs.value;
@@ -975,51 +978,194 @@ bool AbslParseFlag(absl::string_view, SmallAlignUDT*, std::string*) {
}
std::string AbslUnparseFlag(const SmallAlignUDT&) { return ""; }
-// User-defined type with small size, but not trivially copyable.
+} // namespace
+
+ABSL_FLAG(SmallAlignUDT, test_flag_sa_udt, {}, "help");
+
+namespace {
+
+TEST_F(FlagTest, TestSmallAlignUDT) {
+ EXPECT_EQ(flags::StorageKind<SmallAlignUDT>(),
+ flags::FlagValueStorageKind::kSequenceLocked);
+ SmallAlignUDT value = absl::GetFlag(FLAGS_test_flag_sa_udt);
+ EXPECT_EQ(value.c, 'A');
+ EXPECT_EQ(value.s, 12);
+
+ value.c = 'B';
+ value.s = 45;
+ absl::SetFlag(&FLAGS_test_flag_sa_udt, value);
+ value = absl::GetFlag(FLAGS_test_flag_sa_udt);
+ EXPECT_EQ(value.c, 'B');
+ EXPECT_EQ(value.s, 45);
+}
+} // namespace
+
+// --------------------------------------------------------------------
+
+namespace {
+
+// User-defined not trivially copyable type.
+template <int id>
struct NonTriviallyCopyableUDT {
- NonTriviallyCopyableUDT() : c('A') {}
- NonTriviallyCopyableUDT(const NonTriviallyCopyableUDT& rhs) : c(rhs.c) {}
+ NonTriviallyCopyableUDT() : c('A') { s_num_instance++; }
+ NonTriviallyCopyableUDT(const NonTriviallyCopyableUDT& rhs) : c(rhs.c) {
+ s_num_instance++;
+ }
NonTriviallyCopyableUDT& operator=(const NonTriviallyCopyableUDT& rhs) {
c = rhs.c;
return *this;
}
+ ~NonTriviallyCopyableUDT() { s_num_instance--; }
+ static uint64_t s_num_instance;
char c;
};
-bool AbslParseFlag(absl::string_view, NonTriviallyCopyableUDT*, std::string*) {
+template <int id>
+uint64_t NonTriviallyCopyableUDT<id>::s_num_instance = 0;
+
+template <int id>
+bool AbslParseFlag(absl::string_view txt, NonTriviallyCopyableUDT<id>* f,
+ std::string*) {
+ f->c = txt.empty() ? '\0' : txt[0];
return true;
}
-std::string AbslUnparseFlag(const NonTriviallyCopyableUDT&) { return ""; }
+template <int id>
+std::string AbslUnparseFlag(const NonTriviallyCopyableUDT<id>&) {
+ return "";
+}
+template <int id, typename F>
+void TestExpectedLeaks(
+ F&& f, uint64_t num_leaks,
+ absl::optional<uint64_t> num_new_instances = absl::nullopt) {
+ if (!num_new_instances.has_value()) num_new_instances = num_leaks;
+
+ auto num_leaked_before = flags::NumLeakedFlagValues();
+ auto num_instances_before = NonTriviallyCopyableUDT<id>::s_num_instance;
+ f();
+ EXPECT_EQ(num_leaked_before + num_leaks, flags::NumLeakedFlagValues());
+ EXPECT_EQ(num_instances_before + num_new_instances.value(),
+ NonTriviallyCopyableUDT<id>::s_num_instance);
+}
} // namespace
-ABSL_FLAG(SmallAlignUDT, test_flag_sa_udt, {}, "help");
-ABSL_FLAG(NonTriviallyCopyableUDT, test_flag_ntc_udt, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<1>, test_flag_ntc_udt1, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<2>, test_flag_ntc_udt2, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<3>, test_flag_ntc_udt3, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<4>, test_flag_ntc_udt4, {}, "help");
+ABSL_FLAG(NonTriviallyCopyableUDT<5>, test_flag_ntc_udt5, {}, "help");
namespace {
-TEST_F(FlagTest, TestSmallAlignUDT) {
- SmallAlignUDT value = absl::GetFlag(FLAGS_test_flag_sa_udt);
- EXPECT_EQ(value.c, 'A');
- EXPECT_EQ(value.s, 12);
+TEST_F(FlagTest, TestNonTriviallyCopyableGetSetSet) {
+ EXPECT_EQ(flags::StorageKind<NonTriviallyCopyableUDT<1>>(),
+ flags::FlagValueStorageKind::kHeapAllocated);
+
+ TestExpectedLeaks<1>(
+ [&] {
+ NonTriviallyCopyableUDT<1> value =
+ absl::GetFlag(FLAGS_test_flag_ntc_udt1);
+ EXPECT_EQ(value.c, 'A');
+ },
+ 0);
+
+ TestExpectedLeaks<1>(
+ [&] {
+ NonTriviallyCopyableUDT<1> value;
+ value.c = 'B';
+ absl::SetFlag(&FLAGS_test_flag_ntc_udt1, value);
+ EXPECT_EQ(value.c, 'B');
+ },
+ 1);
+
+ TestExpectedLeaks<1>(
+ [&] {
+ NonTriviallyCopyableUDT<1> value;
+ value.c = 'C';
+ absl::SetFlag(&FLAGS_test_flag_ntc_udt1, value);
+ },
+ 0);
+}
- value.c = 'B';
- value.s = 45;
- absl::SetFlag(&FLAGS_test_flag_sa_udt, value);
- value = absl::GetFlag(FLAGS_test_flag_sa_udt);
- EXPECT_EQ(value.c, 'B');
- EXPECT_EQ(value.s, 45);
+TEST_F(FlagTest, TestNonTriviallyCopyableParseSet) {
+ TestExpectedLeaks<2>(
+ [&] {
+ const char* in_argv[] = {"testbin", "--test_flag_ntc_udt2=A"};
+ absl::ParseCommandLine(2, const_cast<char**>(in_argv));
+ },
+ 0);
+
+ TestExpectedLeaks<2>(
+ [&] {
+ NonTriviallyCopyableUDT<2> value;
+ value.c = 'B';
+ absl::SetFlag(&FLAGS_test_flag_ntc_udt2, value);
+ EXPECT_EQ(value.c, 'B');
+ },
+ 0);
}
-TEST_F(FlagTest, TestNonTriviallyCopyableUDT) {
- NonTriviallyCopyableUDT value = absl::GetFlag(FLAGS_test_flag_ntc_udt);
- EXPECT_EQ(value.c, 'A');
+TEST_F(FlagTest, TestNonTriviallyCopyableSet) {
+ TestExpectedLeaks<3>(
+ [&] {
+ NonTriviallyCopyableUDT<3> value;
+ value.c = 'B';
+ absl::SetFlag(&FLAGS_test_flag_ntc_udt3, value);
+ EXPECT_EQ(value.c, 'B');
+ },
+ 0);
+}
- value.c = 'B';
- absl::SetFlag(&FLAGS_test_flag_ntc_udt, value);
- value = absl::GetFlag(FLAGS_test_flag_ntc_udt);
- EXPECT_EQ(value.c, 'B');
+// One new instance created during initialization and stored in the flag.
+auto premain_utd4_get =
+ (TestExpectedLeaks<4>([] { (void)absl::GetFlag(FLAGS_test_flag_ntc_udt4); },
+ 0, 1),
+ false);
+
+TEST_F(FlagTest, TestNonTriviallyCopyableGetBeforeMainParseGet) {
+ TestExpectedLeaks<4>(
+ [&] {
+ const char* in_argv[] = {"testbin", "--test_flag_ntc_udt4=C"};
+ absl::ParseCommandLine(2, const_cast<char**>(in_argv));
+ },
+ 1);
+
+ TestExpectedLeaks<4>(
+ [&] {
+ NonTriviallyCopyableUDT<4> value =
+ absl::GetFlag(FLAGS_test_flag_ntc_udt4);
+ EXPECT_EQ(value.c, 'C');
+ },
+ 0);
+}
+
+// One new instance created during initialization, which is reused since it was
+// never read.
+auto premain_utd5_set = (TestExpectedLeaks<5>(
+ [] {
+ NonTriviallyCopyableUDT<5> value;
+ value.c = 'B';
+ absl::SetFlag(&FLAGS_test_flag_ntc_udt5, value);
+ },
+ 0, 1),
+ false);
+
+TEST_F(FlagTest, TestNonTriviallyCopyableSetParseGet) {
+ TestExpectedLeaks<5>(
+ [&] {
+ const char* in_argv[] = {"testbin", "--test_flag_ntc_udt5=C"};
+ absl::ParseCommandLine(2, const_cast<char**>(in_argv));
+ },
+ 0);
+
+ TestExpectedLeaks<5>(
+ [&] {
+ NonTriviallyCopyableUDT<5> value =
+ absl::GetFlag(FLAGS_test_flag_ntc_udt5);
+ EXPECT_EQ(value.c, 'C');
+ },
+ 0);
}
} // namespace
diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc
index 65d0e58f..981f19fd 100644
--- a/absl/flags/internal/flag.cc
+++ b/absl/flags/internal/flag.cc
@@ -22,14 +22,17 @@
#include <array>
#include <atomic>
+#include <cstring>
#include <memory>
-#include <new>
#include <string>
#include <typeinfo>
+#include <vector>
+#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/casts.h"
#include "absl/base/config.h"
+#include "absl/base/const_init.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/optimization.h"
#include "absl/flags/config.h"
@@ -44,10 +47,9 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
-// The help message indicating that the commandline flag has been
-// 'stripped'. It will not show up when doing "-help" and its
-// variants. The flag is stripped if ABSL_FLAGS_STRIP_HELP is set to 1
-// before including absl/flags/flag.h
+// The help message indicating that the commandline flag has been stripped. It
+// will not show up when doing "-help" and its variants. The flag is stripped
+// if ABSL_FLAGS_STRIP_HELP is set to 1 before including absl/flags/flag.h
const char kStrippedFlagHelp[] = "\001\002\003\004 (unknown) \004\003\002\001";
namespace {
@@ -78,9 +80,32 @@ class MutexRelock {
absl::Mutex& mu_;
};
+// This is a freelist of leaked flag values and guard for its access.
+// When we can't guarantee it is safe to reuse the memory for flag values,
+// we move the memory to the freelist where it lives indefinitely, so it can
+// still be safely accessed. This also prevents leak checkers from complaining
+// about the leaked memory that can no longer be accessed through any pointer.
+ABSL_CONST_INIT absl::Mutex s_freelist_guard(absl::kConstInit);
+ABSL_CONST_INIT std::vector<void*>* s_freelist = nullptr;
+
+void AddToFreelist(void* p) {
+ absl::MutexLock l(&s_freelist_guard);
+ if (!s_freelist) {
+ s_freelist = new std::vector<void*>;
+ }
+ s_freelist->push_back(p);
+}
+
} // namespace
///////////////////////////////////////////////////////////////////////////////
+
+uint64_t NumLeakedFlagValues() {
+ absl::MutexLock l(&s_freelist_guard);
+ return s_freelist == nullptr ? 0u : s_freelist->size();
+}
+
+///////////////////////////////////////////////////////////////////////////////
// Persistent state of the flag data.
class FlagImpl;
@@ -97,7 +122,7 @@ class FlagState : public flags_internal::FlagStateInterface {
counter_(counter) {}
~FlagState() override {
- if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kAlignedBuffer &&
+ if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kHeapAllocated &&
flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked)
return;
flags_internal::Delete(flag_impl_.op_, value_.heap_allocated);
@@ -140,6 +165,33 @@ void DynValueDeleter::operator()(void* ptr) const {
Delete(op, ptr);
}
+MaskedPointer::MaskedPointer(ptr_t rhs, bool is_candidate) : ptr_(rhs) {
+ if (is_candidate) {
+ ApplyMask(kUnprotectedReadCandidate);
+ }
+}
+
+bool MaskedPointer::IsUnprotectedReadCandidate() const {
+ return CheckMask(kUnprotectedReadCandidate);
+}
+
+bool MaskedPointer::HasBeenRead() const { return CheckMask(kHasBeenRead); }
+
+void MaskedPointer::Set(FlagOpFn op, const void* src, bool is_candidate) {
+ flags_internal::Copy(op, src, Ptr());
+ if (is_candidate) {
+ ApplyMask(kUnprotectedReadCandidate);
+ }
+}
+void MaskedPointer::MarkAsRead() { ApplyMask(kHasBeenRead); }
+
+void MaskedPointer::ApplyMask(mask_t mask) {
+ ptr_ = reinterpret_cast<ptr_t>(reinterpret_cast<mask_t>(ptr_) | mask);
+}
+bool MaskedPointer::CheckMask(mask_t mask) const {
+ return (reinterpret_cast<mask_t>(ptr_) & mask) != 0;
+}
+
void FlagImpl::Init() {
new (&data_guard_) absl::Mutex;
@@ -174,11 +226,16 @@ void FlagImpl::Init() {
(*default_value_.gen_func)(AtomicBufferValue());
break;
}
- case FlagValueStorageKind::kAlignedBuffer:
+ case FlagValueStorageKind::kHeapAllocated:
// For this storage kind the default_value_ always points to gen_func
// during initialization.
assert(def_kind == FlagDefaultKind::kGenFunc);
- (*default_value_.gen_func)(AlignedBufferValue());
+ // Flag value initially points to the internal buffer.
+ MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+ (*default_value_.gen_func)(ptr_value.Ptr());
+ // Default value is a candidate for an unprotected read.
+ PtrStorage().store(MaskedPointer(ptr_value.Ptr(), true),
+ std::memory_order_release);
break;
}
seq_lock_.MarkInitialized();
@@ -234,7 +291,7 @@ std::unique_ptr<void, DynValueDeleter> FlagImpl::MakeInitValue() const {
return {res, DynValueDeleter{op_}};
}
-void FlagImpl::StoreValue(const void* src) {
+void FlagImpl::StoreValue(const void* src, ValueSource source) {
switch (ValueStorageKind()) {
case FlagValueStorageKind::kValueAndInitBit:
case FlagValueStorageKind::kOneWordAtomic: {
@@ -249,8 +306,27 @@ void FlagImpl::StoreValue(const void* src) {
seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_));
break;
}
- case FlagValueStorageKind::kAlignedBuffer:
- Copy(op_, src, AlignedBufferValue());
+ case FlagValueStorageKind::kHeapAllocated:
+ MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+
+ if (ptr_value.IsUnprotectedReadCandidate() && ptr_value.HasBeenRead()) {
+ // If current value is a candidate for an unprotected read and if it was
+ // already read at least once, follow up reads (if any) are done without
+ // mutex protection. We can't guarantee it is safe to reuse this memory
+ // since it may have been accessed by another thread concurrently, so
+ // instead we move the memory to a freelist so it can still be safely
+ // accessed, and allocate a new one for the new value.
+ AddToFreelist(ptr_value.Ptr());
+ ptr_value = MaskedPointer(Clone(op_, src), source == kCommandLine);
+ } else {
+ // Current value either was set programmatically or was never read.
+ // We can reuse the memory since all accesses to this value (if any)
+ // were protected by mutex. That said, if a new value comes from command
+ // line it now becomes a candidate for an unprotected read.
+ ptr_value.Set(op_, src, source == kCommandLine);
+ }
+
+ PtrStorage().store(ptr_value, std::memory_order_release);
seq_lock_.IncrementModificationCount();
break;
}
@@ -305,9 +381,10 @@ std::string FlagImpl::CurrentValue() const {
ReadSequenceLockedData(cloned.get());
return flags_internal::Unparse(op_, cloned.get());
}
- case FlagValueStorageKind::kAlignedBuffer: {
+ case FlagValueStorageKind::kHeapAllocated: {
absl::MutexLock l(guard);
- return flags_internal::Unparse(op_, AlignedBufferValue());
+ return flags_internal::Unparse(
+ op_, PtrStorage().load(std::memory_order_acquire).Ptr());
}
}
@@ -370,10 +447,12 @@ std::unique_ptr<FlagStateInterface> FlagImpl::SaveState() {
return absl::make_unique<FlagState>(*this, cloned, modified,
on_command_line, ModificationCount());
}
- case FlagValueStorageKind::kAlignedBuffer: {
+ case FlagValueStorageKind::kHeapAllocated: {
return absl::make_unique<FlagState>(
- *this, flags_internal::Clone(op_, AlignedBufferValue()), modified,
- on_command_line, ModificationCount());
+ *this,
+ flags_internal::Clone(
+ op_, PtrStorage().load(std::memory_order_acquire).Ptr()),
+ modified, on_command_line, ModificationCount());
}
}
return nullptr;
@@ -388,11 +467,11 @@ bool FlagImpl::RestoreState(const FlagState& flag_state) {
switch (ValueStorageKind()) {
case FlagValueStorageKind::kValueAndInitBit:
case FlagValueStorageKind::kOneWordAtomic:
- StoreValue(&flag_state.value_.one_word);
+ StoreValue(&flag_state.value_.one_word, kProgrammaticChange);
break;
case FlagValueStorageKind::kSequenceLocked:
- case FlagValueStorageKind::kAlignedBuffer:
- StoreValue(flag_state.value_.heap_allocated);
+ case FlagValueStorageKind::kHeapAllocated:
+ StoreValue(flag_state.value_.heap_allocated, kProgrammaticChange);
break;
}
@@ -411,11 +490,6 @@ StorageT* FlagImpl::OffsetValue() const {
return reinterpret_cast<StorageT*>(p + offset);
}
-void* FlagImpl::AlignedBufferValue() const {
- assert(ValueStorageKind() == FlagValueStorageKind::kAlignedBuffer);
- return OffsetValue<void>();
-}
-
std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const {
assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked);
return OffsetValue<std::atomic<uint64_t>>();
@@ -427,6 +501,11 @@ std::atomic<int64_t>& FlagImpl::OneWordValue() const {
return OffsetValue<FlagOneWordValue>()->value;
}
+std::atomic<MaskedPointer>& FlagImpl::PtrStorage() const {
+ assert(ValueStorageKind() == FlagValueStorageKind::kHeapAllocated);
+ return OffsetValue<FlagMaskedPointerValue>()->value;
+}
+
// Attempts to parse supplied `value` string using parsing routine in the `flag`
// argument. If parsing successful, this function replaces the dst with newly
// parsed value. In case if any error is encountered in either step, the error
@@ -460,9 +539,17 @@ void FlagImpl::Read(void* dst) const {
ReadSequenceLockedData(dst);
break;
}
- case FlagValueStorageKind::kAlignedBuffer: {
+ case FlagValueStorageKind::kHeapAllocated: {
absl::MutexLock l(guard);
- flags_internal::CopyConstruct(op_, AlignedBufferValue(), dst);
+ MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire);
+
+ flags_internal::CopyConstruct(op_, ptr_value.Ptr(), dst);
+
+ // For unprotected read candidates, mark that the value as has been read.
+ if (ptr_value.IsUnprotectedReadCandidate() && !ptr_value.HasBeenRead()) {
+ ptr_value.MarkAsRead();
+ PtrStorage().store(ptr_value, std::memory_order_release);
+ }
break;
}
}
@@ -513,7 +600,7 @@ void FlagImpl::Write(const void* src) {
}
}
- StoreValue(src);
+ StoreValue(src, kProgrammaticChange);
}
// Sets the value of the flag based on specified string `value`. If the flag
@@ -534,7 +621,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode,
auto tentative_value = TryParse(value, err);
if (!tentative_value) return false;
- StoreValue(tentative_value.get());
+ StoreValue(tentative_value.get(), source);
if (source == kCommandLine) {
on_command_line_ = true;
@@ -555,7 +642,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode,
auto tentative_value = TryParse(value, err);
if (!tentative_value) return false;
- StoreValue(tentative_value.get());
+ StoreValue(tentative_value.get(), source);
break;
}
case SET_FLAGS_DEFAULT: {
@@ -573,7 +660,7 @@ bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode,
if (!modified_) {
// Need to set both default value *and* current, in this case.
- StoreValue(default_value_.dynamic_value);
+ StoreValue(default_value_.dynamic_value, source);
modified_ = false;
}
break;
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index 602531b1..a0be31d9 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -295,11 +295,8 @@ constexpr FlagDefaultArg DefaultArg(char) {
}
///////////////////////////////////////////////////////////////////////////////
-// Flag current value auxiliary structs.
-
-constexpr int64_t UninitializedFlagValue() {
- return static_cast<int64_t>(0xababababababababll);
-}
+// Flag storage selector traits. Each trait indicates what kind of storage kind
+// to use for the flag value.
template <typename T>
using FlagUseValueAndInitBitStorage =
@@ -321,9 +318,11 @@ enum class FlagValueStorageKind : uint8_t {
kValueAndInitBit = 0,
kOneWordAtomic = 1,
kSequenceLocked = 2,
- kAlignedBuffer = 3,
+ kHeapAllocated = 3,
};
+// This constexpr function returns the storage kind for the given flag value
+// type.
template <typename T>
static constexpr FlagValueStorageKind StorageKind() {
return FlagUseValueAndInitBitStorage<T>::value
@@ -332,14 +331,24 @@ static constexpr FlagValueStorageKind StorageKind() {
? FlagValueStorageKind::kOneWordAtomic
: FlagUseSequenceLockStorage<T>::value
? FlagValueStorageKind::kSequenceLocked
- : FlagValueStorageKind::kAlignedBuffer;
+ : FlagValueStorageKind::kHeapAllocated;
}
+// This is a base class for the storage classes used by kOneWordAtomic and
+// kValueAndInitBit storage kinds. It literally just stores the one word value
+// as an atomic. By default, it is initialized to a magic value that is unlikely
+// a valid value for the flag value type.
struct FlagOneWordValue {
+ constexpr static int64_t Uninitialized() {
+ return static_cast<int64_t>(0xababababababababll);
+ }
+
+ constexpr FlagOneWordValue() : value(Uninitialized()) {}
constexpr explicit FlagOneWordValue(int64_t v) : value(v) {}
std::atomic<int64_t> value;
};
+// This class represents a memory layout used by kValueAndInitBit storage kind.
template <typename T>
struct alignas(8) FlagValueAndInitBit {
T value;
@@ -348,16 +357,91 @@ struct alignas(8) FlagValueAndInitBit {
uint8_t init;
};
+// This class implements an aligned pointer with two options stored via masks
+// in unused bits of the pointer value (due to alignment requirement).
+// - IsUnprotectedReadCandidate - indicates that the value can be switched to
+// unprotected read without a lock.
+// - HasBeenRead - indicates that the value has been read at least once.
+// - AllowsUnprotectedRead - combination of the two options above and indicates
+// that the value can now be read without a lock.
+// Further details of these options and their use is covered in the description
+// of the FlagValue<T, FlagValueStorageKind::kHeapAllocated> specialization.
+class MaskedPointer {
+ public:
+ using mask_t = uintptr_t;
+ using ptr_t = void*;
+
+ static constexpr int RequiredAlignment() { return 4; }
+
+ constexpr explicit MaskedPointer(ptr_t rhs) : ptr_(rhs) {}
+ MaskedPointer(ptr_t rhs, bool is_candidate);
+
+ void* Ptr() const {
+ return reinterpret_cast<void*>(reinterpret_cast<mask_t>(ptr_) &
+ kPtrValueMask);
+ }
+ bool AllowsUnprotectedRead() const {
+ return (reinterpret_cast<mask_t>(ptr_) & kAllowsUnprotectedRead) ==
+ kAllowsUnprotectedRead;
+ }
+ bool IsUnprotectedReadCandidate() const;
+ bool HasBeenRead() const;
+
+ void Set(FlagOpFn op, const void* src, bool is_candidate);
+ void MarkAsRead();
+
+ private:
+ // Masks
+ // Indicates that the flag value either default or originated from command
+ // line.
+ static constexpr mask_t kUnprotectedReadCandidate = 0x1u;
+ // Indicates that flag has been read.
+ static constexpr mask_t kHasBeenRead = 0x2u;
+ static constexpr mask_t kAllowsUnprotectedRead =
+ kUnprotectedReadCandidate | kHasBeenRead;
+ static constexpr mask_t kPtrValueMask = ~kAllowsUnprotectedRead;
+
+ void ApplyMask(mask_t mask);
+ bool CheckMask(mask_t mask) const;
+
+ ptr_t ptr_;
+};
+
+// This class implements a type erased storage of the heap allocated flag value.
+// It is used as a base class for the storage class for kHeapAllocated storage
+// kind. The initial_buffer is expected to have an alignment of at least
+// MaskedPointer::RequiredAlignment(), so that the bits used by the
+// MaskedPointer to store masks are set to 0. This guarantees that value starts
+// in an uninitialized state.
+struct FlagMaskedPointerValue {
+ constexpr explicit FlagMaskedPointerValue(MaskedPointer::ptr_t initial_buffer)
+ : value(MaskedPointer(initial_buffer)) {}
+
+ std::atomic<MaskedPointer> value;
+};
+
+// This is the forward declaration for the template that represents a storage
+// for the flag values. This template is expected to be explicitly specialized
+// for each storage kind and it does not have a generic default
+// implementation.
template <typename T,
FlagValueStorageKind Kind = flags_internal::StorageKind<T>()>
struct FlagValue;
+// This specialization represents the storage of flag values types with the
+// kValueAndInitBit storage kind. It is based on the FlagOneWordValue class
+// and relies on memory layout in FlagValueAndInitBit<T> to indicate that the
+// value has been initialized or not.
template <typename T>
struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue {
constexpr FlagValue() : FlagOneWordValue(0) {}
bool Get(const SequenceLock&, T& dst) const {
int64_t storage = value.load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(storage == 0)) {
+ // This assert is to ensure that the initialization inside FlagImpl::Init
+ // is able to set init member correctly.
+ static_assert(offsetof(FlagValueAndInitBit<T>, init) == sizeof(T),
+ "Unexpected memory layout of FlagValueAndInitBit");
return false;
}
dst = absl::bit_cast<FlagValueAndInitBit<T>>(storage).value;
@@ -365,12 +449,16 @@ struct FlagValue<T, FlagValueStorageKind::kValueAndInitBit> : FlagOneWordValue {
}
};
+// This specialization represents the storage of flag values types with the
+// kOneWordAtomic storage kind. It is based on the FlagOneWordValue class
+// and relies on the magic uninitialized state of default constructed instead of
+// FlagOneWordValue to indicate that the value has been initialized or not.
template <typename T>
struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
- constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {}
+ constexpr FlagValue() : FlagOneWordValue() {}
bool Get(const SequenceLock&, T& dst) const {
int64_t one_word_val = value.load(std::memory_order_acquire);
- if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) {
+ if (ABSL_PREDICT_FALSE(one_word_val == FlagOneWordValue::Uninitialized())) {
return false;
}
std::memcpy(&dst, static_cast<const void*>(&one_word_val), sizeof(T));
@@ -378,6 +466,12 @@ struct FlagValue<T, FlagValueStorageKind::kOneWordAtomic> : FlagOneWordValue {
}
};
+// This specialization represents the storage of flag values types with the
+// kSequenceLocked storage kind. This storage is used by trivially copyable
+// types with size greater than 8 bytes. This storage relies on uninitialized
+// state of the SequenceLock to indicate that the value has been initialized or
+// not. This storage also provides lock-free read access to the underlying
+// value once it is initialized.
template <typename T>
struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
bool Get(const SequenceLock& lock, T& dst) const {
@@ -391,11 +485,62 @@ struct FlagValue<T, FlagValueStorageKind::kSequenceLocked> {
std::atomic<uint64_t>) std::atomic<uint64_t> value_words[kNumWords];
};
+// This specialization represents the storage of flag values types with the
+// kHeapAllocated storage kind. This is a storage of last resort and is used
+// if none of other storage kinds are applicable.
+//
+// Generally speaking the values with this storage kind can't be accessed
+// atomically and thus can't be read without holding a lock. If we would ever
+// want to avoid the lock, we'd need to leak the old value every time new flag
+// value is being set (since we are in danger of having a race condition
+// otherwise).
+//
+// Instead of doing that, this implementation attempts to cater to some common
+// use cases by allowing at most 2 values to be leaked - default value and
+// value set from the command line.
+//
+// This specialization provides an initial buffer for the first flag value. This
+// is where the default value is going to be stored. We attempt to reuse this
+// buffer if possible, including storing the value set from the command line
+// there.
+//
+// As long as we only read this value, we can access it without a lock (in
+// practice we still use the lock for the very first read to be able set
+// "has been read" option on this flag).
+//
+// If flag is specified on the command line we store the parsed value either
+// in the internal buffer (if the default value never been read) or we leak the
+// default value and allocate the new storage for the parse value. This value is
+// also a candidate for an unprotected read. If flag is set programmatically
+// after the command line is parsed, the storage for this value is going to be
+// leaked. Note that in both scenarios we are not going to have a real leak.
+// Instead we'll store the leaked value pointers in the internal freelist to
+// avoid triggering the memory leak checker complains.
+//
+// If the flag is ever set programmatically, it stops being the candidate for an
+// unprotected read, and any follow up access to the flag value requires a lock.
+// Note that if the value if set programmatically before the command line is
+// parsed, we can switch back to enabling unprotected reads for that value.
template <typename T>
-struct FlagValue<T, FlagValueStorageKind::kAlignedBuffer> {
- bool Get(const SequenceLock&, T&) const { return false; }
+struct FlagValue<T, FlagValueStorageKind::kHeapAllocated>
+ : FlagMaskedPointerValue {
+ // We const initialize the value with unmasked pointer to the internal buffer,
+ // making sure it is not a candidate for unprotected read. This way we can
+ // ensure Init is done before any access to the flag value.
+ constexpr FlagValue() : FlagMaskedPointerValue(&buffer[0]) {}
+
+ bool Get(const SequenceLock&, T& dst) const {
+ MaskedPointer ptr_value = value.load(std::memory_order_acquire);
+
+ if (ABSL_PREDICT_TRUE(ptr_value.AllowsUnprotectedRead())) {
+ ::new (static_cast<void*>(&dst)) T(*static_cast<T*>(ptr_value.Ptr()));
+ return true;
+ }
+ return false;
+ }
- alignas(T) char value[sizeof(T)];
+ alignas(MaskedPointer::RequiredAlignment()) alignas(
+ T) char buffer[sizeof(T)]{};
};
///////////////////////////////////////////////////////////////////////////////
@@ -483,7 +628,7 @@ class FlagImpl final : public CommandLineFlag {
// Used in read/write operations to validate source/target has correct type.
// For example if flag is declared as absl::Flag<int> FLAGS_foo, a call to
// absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed
- // int. To do that we pass the "assumed" type id (which is deduced from type
+ // int. To do that we pass the assumed type id (which is deduced from type
// int) as an argument `type_id`, which is in turn is validated against the
// type id stored in flag object by flag definition statement.
void AssertValidType(FlagFastTypeId type_id,
@@ -504,17 +649,13 @@ class FlagImpl final : public CommandLineFlag {
void Init();
// Offset value access methods. One per storage kind. These methods to not
- // respect const correctness, so be very carefull using them.
+ // respect const correctness, so be very careful using them.
// This is a shared helper routine which encapsulates most of the magic. Since
// it is only used inside the three routines below, which are defined in
// flag.cc, we can define it in that file as well.
template <typename StorageT>
StorageT* OffsetValue() const;
- // This is an accessor for a value stored in an aligned buffer storage
- // used for non-trivially-copyable data types.
- // Returns a mutable pointer to the start of a buffer.
- void* AlignedBufferValue() const;
// The same as above, but used for sequencelock-protected storage.
std::atomic<uint64_t>* AtomicBufferValue() const;
@@ -523,13 +664,16 @@ class FlagImpl final : public CommandLineFlag {
// mutable reference to an atomic value.
std::atomic<int64_t>& OneWordValue() const;
+ std::atomic<MaskedPointer>& PtrStorage() const;
+
// Attempts to parse supplied `value` string. If parsing is successful,
// returns new value. Otherwise returns nullptr.
std::unique_ptr<void, DynValueDeleter> TryParse(absl::string_view value,
std::string& err) const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
// Stores the flag value based on the pointer to the source.
- void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
+ void StoreValue(const void* src, ValueSource source)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard());
// Copy the flag data, protected by `seq_lock_` into `dst`.
//
@@ -585,7 +729,7 @@ class FlagImpl final : public CommandLineFlag {
const char* const name_;
// The file name where ABSL_FLAG resides.
const char* const filename_;
- // Type-specific operations "vtable".
+ // Type-specific operations vtable.
const FlagOpFn op_;
// Help message literal or function to generate it.
const FlagHelpMsg help_;
@@ -720,16 +864,21 @@ class FlagImplPeer {
// Implementation of Flag value specific operations routine.
template <typename T>
void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
+ struct AlignedSpace {
+ alignas(MaskedPointer::RequiredAlignment()) alignas(T) char buf[sizeof(T)];
+ };
+ using Allocator = std::allocator<AlignedSpace>;
switch (op) {
case FlagOp::kAlloc: {
- std::allocator<T> alloc;
- return std::allocator_traits<std::allocator<T>>::allocate(alloc, 1);
+ Allocator alloc;
+ return std::allocator_traits<Allocator>::allocate(alloc, 1);
}
case FlagOp::kDelete: {
T* p = static_cast<T*>(v2);
p->~T();
- std::allocator<T> alloc;
- std::allocator_traits<std::allocator<T>>::deallocate(alloc, p, 1);
+ Allocator alloc;
+ std::allocator_traits<Allocator>::deallocate(
+ alloc, reinterpret_cast<AlignedSpace*>(p), 1);
return nullptr;
}
case FlagOp::kCopy:
@@ -789,7 +938,7 @@ class FlagRegistrar {
return *this;
}
- // Make the registrar "die" gracefully as an empty struct on a line where
+ // Makes the registrar die gracefully as an empty struct on a line where
// registration happens. Registrar objects are intended to live only as
// temporary.
constexpr operator FlagRegistrarEmpty() const { return {}; } // NOLINT
@@ -798,6 +947,10 @@ class FlagRegistrar {
Flag<T>& flag_; // Flag being registered (not owned).
};
+///////////////////////////////////////////////////////////////////////////////
+// Test only API
+uint64_t NumLeakedFlagValues();
+
} // namespace flags_internal
ABSL_NAMESPACE_END
} // namespace absl