summaryrefslogtreecommitdiff
path: root/absl/flags/internal/flag.cc
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2020-03-03 11:22:10 -0800
committerGravatar Andy Soffer <asoffer@google.com>2020-03-03 17:32:55 -0500
commitb19ba96766db08b1f32605cb4424a0e7ea0c7584 (patch)
treec4ba295b067b000b9d84410ec81e0095715641a5 /absl/flags/internal/flag.cc
parent06f0e767d13d4d68071c4fc51e25724e0fc8bc74 (diff)
Export of internal Abseil changes
-- a3e58c1870a9626039f4d178d2d599319bd9f8a8 by Matt Kulukundis <kfm@google.com>: Allow MakeCordFromExternal to take a zero arg releaser. PiperOrigin-RevId: 298650274 -- 01897c4a9bb99f3dc329a794019498ad345ddebd by Samuel Benzaquen <sbenza@google.com>: Reduce library bloat for absl::Flag by moving the definition of base virtual functions to a .cc file. This removes the duplicate symbols in user translation units and has the side effect of moving the vtable definition too (re key function) PiperOrigin-RevId: 298617920 -- 190f0d3782c63aed01046886d7fbc1be5bca2de9 by Derek Mauro <dmauro@google.com>: Import GitHub #596: Unbreak stacktrace code for UWP apps PiperOrigin-RevId: 298600834 -- cd5cf6f8c87b35b85a9584e94da2a99057345b73 by Gennadiy Rozental <rogeeff@google.com>: Use union of heap allocated pointer, one word atomic and two word atomic to represent flags value. Any type T, which is trivially copy-able and with with sizeof(T) <= 8, will be stored in atomic int64_t. Any type T, which is trivially copy-able and with with 8 < sizeof(T) <= 16, will be stored in atomic AlignedTwoWords. We also introducing value storage type to distinguish these cases. PiperOrigin-RevId: 298497200 -- f8fe7bd53bfed601f002f521e34ab4bc083fc28b by Matthew Brown <matthewbr@google.com>: Ensure a deep copy and proper equality on absl::Status::ErasePayload PiperOrigin-RevId: 298482742 -- a5c9ccddf4b04f444e3f7e27dbc14faf1fcb5373 by Gennadiy Rozental <rogeeff@google.com>: Change ChunkIterator implementation to use fixed capacity collection of CordRep*. We can now assume that depth never exceeds 91. That makes comparison operator exception safe. I've tested that with this CL we do not observe an overhead of chunk_end. Compiler optimized this iterator completely. PiperOrigin-RevId: 298458472 -- 327ea5e8910bc388b03389c730763f9823abfce5 by Abseil Team <absl-team@google.com>: Minor cleanups in b-tree code: - Rename some variables: fix issues of different param names between definition/declaration, move away from `x` as a default meaningless variable name. - Make init_leaf/init_internal be non-static methods (they already take the node as the first parameter). - In internal_emplace/try_shrink, update root/rightmost the same way as in insert_unique/insert_multi. - Replace a TODO with a comment. PiperOrigin-RevId: 298432836 -- 8020ce9ec8558ee712d9733ae3d660ac1d3ffe1a by Abseil Team <absl-team@google.com>: Guard against unnecessary copy in case the buffer is empty. This is important in cases were the user is explicitly tuning their chunks to match PiecewiseChunkSize(). PiperOrigin-RevId: 298366044 -- 89324441d1c0c697c90ba7d8fc63639805fcaa9d by Abseil Team <absl-team@google.com>: Internal change PiperOrigin-RevId: 298219363 GitOrigin-RevId: a3e58c1870a9626039f4d178d2d599319bd9f8a8 Change-Id: I28dffc684b6fd0292b94807b88ec6664d5d0e183
Diffstat (limited to 'absl/flags/internal/flag.cc')
-rw-r--r--absl/flags/internal/flag.cc107
1 files changed, 77 insertions, 30 deletions
diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc
index 5a921e28..a944e16e 100644
--- a/absl/flags/internal/flag.cc
+++ b/absl/flags/internal/flag.cc
@@ -77,19 +77,33 @@ class MutexRelock {
void FlagImpl::Init() {
new (&data_guard_) absl::Mutex;
- absl::MutexLock lock(reinterpret_cast<absl::Mutex*>(&data_guard_));
-
- value_.dynamic = MakeInitValue().release();
- StoreAtomic();
+ // At this point the default_value_ always points to gen_func.
+ std::unique_ptr<void, DynValueDeleter> init_value(
+ (*default_value_.gen_func)(), DynValueDeleter{op_});
+ switch (ValueStorageKind()) {
+ case FlagValueStorageKind::kHeapAllocated:
+ value_.dynamic = init_value.release();
+ break;
+ case FlagValueStorageKind::kOneWordAtomic: {
+ int64_t atomic_value;
+ std::memcpy(&atomic_value, init_value.get(), Sizeof(op_));
+ value_.one_word_atomic.store(atomic_value, std::memory_order_release);
+ break;
+ }
+ case FlagValueStorageKind::kTwoWordsAtomic: {
+ AlignedTwoWords atomic_value{0, 0};
+ std::memcpy(&atomic_value, init_value.get(), Sizeof(op_));
+ value_.two_words_atomic.store(atomic_value, std::memory_order_release);
+ break;
+ }
+ }
}
-// Ensures that the lazily initialized data is initialized,
-// and returns pointer to the mutex guarding flags data.
absl::Mutex* FlagImpl::DataGuard() const {
absl::call_once(const_cast<FlagImpl*>(this)->init_control_, &FlagImpl::Init,
const_cast<FlagImpl*>(this));
- // data_guard_ is initialized.
+ // data_guard_ is initialized inside Init.
return reinterpret_cast<absl::Mutex*>(&data_guard_);
}
@@ -129,8 +143,24 @@ std::unique_ptr<void, DynValueDeleter> FlagImpl::MakeInitValue() const {
}
void FlagImpl::StoreValue(const void* src) {
- flags_internal::Copy(op_, src, value_.dynamic);
- StoreAtomic();
+ switch (ValueStorageKind()) {
+ case FlagValueStorageKind::kHeapAllocated:
+ Copy(op_, src, value_.dynamic);
+ break;
+ case FlagValueStorageKind::kOneWordAtomic: {
+ int64_t one_word_val;
+ std::memcpy(&one_word_val, src, Sizeof(op_));
+ value_.one_word_atomic.store(one_word_val, std::memory_order_release);
+ break;
+ }
+ case FlagValueStorageKind::kTwoWordsAtomic: {
+ AlignedTwoWords two_words_val{0, 0};
+ std::memcpy(&two_words_val, src, Sizeof(op_));
+ value_.two_words_atomic.store(two_words_val, std::memory_order_release);
+ break;
+ }
+ }
+
modified_ = true;
++counter_;
InvokeCallback();
@@ -165,9 +195,25 @@ std::string FlagImpl::DefaultValue() const {
}
std::string FlagImpl::CurrentValue() const {
- absl::MutexLock l(DataGuard());
+ DataGuard(); // Make sure flag initialized
+ switch (ValueStorageKind()) {
+ case FlagValueStorageKind::kHeapAllocated: {
+ absl::MutexLock l(DataGuard());
+ return flags_internal::Unparse(op_, value_.dynamic);
+ }
+ case FlagValueStorageKind::kOneWordAtomic: {
+ const auto one_word_val =
+ value_.one_word_atomic.load(std::memory_order_acquire);
+ return flags_internal::Unparse(op_, &one_word_val);
+ }
+ case FlagValueStorageKind::kTwoWordsAtomic: {
+ const auto two_words_val =
+ value_.two_words_atomic.load(std::memory_order_acquire);
+ return flags_internal::Unparse(op_, &two_words_val);
+ }
+ }
- return flags_internal::Unparse(op_, value_.dynamic);
+ return "";
}
void FlagImpl::SetCallback(const FlagCallbackFunc mutation_callback) {
@@ -244,26 +290,27 @@ std::unique_ptr<void, DynValueDeleter> FlagImpl::TryParse(
}
void FlagImpl::Read(void* dst) const {
- absl::ReaderMutexLock l(DataGuard());
+ DataGuard(); // Make sure flag initialized
+ switch (ValueStorageKind()) {
+ case FlagValueStorageKind::kHeapAllocated: {
+ absl::MutexLock l(DataGuard());
- flags_internal::CopyConstruct(op_, value_.dynamic, dst);
-}
-
-void FlagImpl::StoreAtomic() {
- size_t data_size = flags_internal::Sizeof(op_);
-
- if (data_size <= sizeof(int64_t)) {
- int64_t t = 0;
- std::memcpy(&t, value_.dynamic, data_size);
- value_.atomics.small_atomic.store(t, std::memory_order_release);
- }
-#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD)
- else if (data_size <= sizeof(FlagsInternalTwoWordsType)) {
- FlagsInternalTwoWordsType t{0, 0};
- std::memcpy(&t, value_.dynamic, data_size);
- value_.atomics.big_atomic.store(t, std::memory_order_release);
+ flags_internal::CopyConstruct(op_, value_.dynamic, dst);
+ break;
+ }
+ case FlagValueStorageKind::kOneWordAtomic: {
+ const auto one_word_val =
+ value_.one_word_atomic.load(std::memory_order_acquire);
+ std::memcpy(dst, &one_word_val, Sizeof(op_));
+ break;
+ }
+ case FlagValueStorageKind::kTwoWordsAtomic: {
+ const auto two_words_val =
+ value_.two_words_atomic.load(std::memory_order_acquire);
+ std::memcpy(dst, &two_words_val, Sizeof(op_));
+ break;
+ }
}
-#endif
}
void FlagImpl::Write(const void* src) {
@@ -339,7 +386,7 @@ bool FlagImpl::SetFromString(absl::string_view value, FlagSettingMode set_mode,
}
if (!modified_) {
- // Need to set both default value *and* current, in this case
+ // Need to set both default value *and* current, in this case.
StoreValue(default_value_.dynamic_value);
modified_ = false;
}