summaryrefslogtreecommitdiff
path: root/absl/strings/internal
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-08-18 10:42:05 -0700
committerGravatar Andy Getz <durandal@google.com>2021-08-18 14:43:23 -0400
commit189d55a57f57731d335fd84999d5dccf771b8e6b (patch)
tree67a9cf189aeb7b270cfc35fda8da7ff8c7522211 /absl/strings/internal
parentc1aa431c071af43d7935bc0d44d560ea851a7696 (diff)
Export of internal Abseil changes
-- 84bcdcd9497d1ec989f50c8dee93f656507c7bd6 by Abseil Team <absl-team@google.com>: Reduce length of the `flat_hash_map<std::string, V>` type name in order to reduce binary bloat. PiperOrigin-RevId: 391560997 -- 5f49bd435e066989851dc045c7786ef400413f66 by Greg Falcon <gfalcon@google.com>: Claim a bit from the Cord refcount for future use. Also rename the increasingly-inaccurately named "Refcount" class to "RefcountAndFlags". In optimized builds, this adds an extra mask instruction to decrement and test operations, but no new branches. Future flags can be added at no extra cost. Each additional flag will of course reduce the range of our refcount, but even with the bit added, we still support refcounts of 500 million. PiperOrigin-RevId: 391557567 GitOrigin-RevId: 84bcdcd9497d1ec989f50c8dee93f656507c7bd6 Change-Id: I051823bf5a9a42d4fa9200e39563ab585ecab331
Diffstat (limited to 'absl/strings/internal')
-rw-r--r--absl/strings/internal/cord_internal.h66
-rw-r--r--absl/strings/internal/cord_rep_btree.h2
2 files changed, 39 insertions, 29 deletions
diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h
index b7f3f4c0..7172b147 100644
--- a/absl/strings/internal/cord_internal.h
+++ b/absl/strings/internal/cord_internal.h
@@ -80,12 +80,13 @@ enum Constants {
kMaxBytesToCopy = 511
};
-// Wraps std::atomic for reference counting.
-class Refcount {
+// Compact class for tracking the reference count and state flags for CordRep
+// instances. Data is stored in an atomic int32_t for compactness and speed.
+class RefcountAndFlags {
public:
- constexpr Refcount() : count_{kRefIncrement} {}
+ constexpr RefcountAndFlags() : count_{kRefIncrement} {}
struct Immortal {};
- explicit constexpr Refcount(Immortal) : count_(kImmortalTag) {}
+ explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
// Increments the reference count. Imposes no memory ordering.
inline void Increment() {
@@ -98,26 +99,27 @@ class Refcount {
// Returns false if there are no references outstanding; true otherwise.
// Inserts barriers to ensure that state written before this method returns
// false will be visible to a thread that just observed this method returning
- // false.
+ // false. Always returns false when the immortal bit is set.
inline bool Decrement() {
- int32_t refcount = count_.load(std::memory_order_acquire);
- assert(refcount > 0 || refcount & kImmortalTag);
+ int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
+ assert(refcount > 0 || refcount & kImmortalFlag);
return refcount != kRefIncrement &&
- count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) !=
- kRefIncrement;
+ (count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
+ kRefcountMask) != kRefIncrement;
}
// Same as Decrement but expect that refcount is greater than 1.
inline bool DecrementExpectHighRefcount() {
int32_t refcount =
- count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel);
- assert(refcount > 0 || refcount & kImmortalTag);
+ count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
+ kRefcountMask;
+ assert(refcount > 0 || refcount & kImmortalFlag);
return refcount != kRefIncrement;
}
// Returns the current reference count using acquire semantics.
inline int32_t Get() const {
- return count_.load(std::memory_order_acquire) >> kImmortalShift;
+ return count_.load(std::memory_order_acquire) >> kNumFlags;
}
// Returns whether the atomic integer is 1.
@@ -127,26 +129,34 @@ class Refcount {
// This call performs the test for a reference count of one, and
// performs the memory barrier needed for the owning thread
// to act on the object, knowing that it has exclusive access to the
- // object.
+ // object. Always returns false when the immortal bit is set.
inline bool IsOne() {
- return count_.load(std::memory_order_acquire) == kRefIncrement;
+ return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
+ kRefIncrement;
}
bool IsImmortal() const {
- return (count_.load(std::memory_order_relaxed) & kImmortalTag) != 0;
+ return (count_.load(std::memory_order_relaxed) & kImmortalFlag) != 0;
}
private:
- // We reserve the bottom bit to tag a reference count as immortal.
- // By making it `1` we ensure that we never reach `0` when adding/subtracting
- // `2`, thus it never looks as if it should be destroyed.
- // These are used for the StringConstant constructor where we do not increase
- // the refcount at construction time (due to constinit requirements) but we
- // will still decrease it at destruction time to avoid branching on Unref.
+ // We reserve the bottom bits for flags.
+ // kImmortalBit indicates that this entity should never be collected; it is
+ // used for the StringConstant constructor to avoid collecting immutable
+ // constant cords.
+ // kReservedFlag is reserved for future use.
enum {
- kImmortalShift = 1,
- kRefIncrement = 1 << kImmortalShift,
- kImmortalTag = kRefIncrement - 1
+ kNumFlags = 2,
+
+ kImmortalFlag = 0x1,
+ kReservedFlag = 0x2,
+ kRefIncrement = (1 << kNumFlags),
+
+ // Bitmask to use when checking refcount by equality. This masks out
+ // all flags except kImmortalFlag, which is part of the refcount for
+ // purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
+ // if the immortal bit is set.)
+ kRefcountMask = ~kReservedFlag,
};
std::atomic<int32_t> count_;
@@ -195,13 +205,13 @@ static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
struct CordRep {
CordRep() = default;
- constexpr CordRep(Refcount::Immortal immortal, size_t l)
+ constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
: length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
// The following three fields have to be less than 32 bytes since
// that is the smallest supported flat node size.
size_t length;
- Refcount refcount;
+ RefcountAndFlags refcount;
// If tag < FLAT, it represents CordRepKind and indicates the type of node.
// Otherwise, the node type is CordRepFlat and the tag is the encoded size.
uint8_t tag;
@@ -275,7 +285,7 @@ using ExternalReleaserInvoker = void (*)(CordRepExternal*);
struct CordRepExternal : public CordRep {
CordRepExternal() = default;
explicit constexpr CordRepExternal(absl::string_view str)
- : CordRep(Refcount::Immortal{}, str.size()),
+ : CordRep(RefcountAndFlags::Immortal{}, str.size()),
base(str.data()),
releaser_invoker(nullptr) {}
@@ -529,7 +539,7 @@ class InlineData {
// store the size in the last char of `as_chars_` shifted left + 1.
// Else we store it in a tree and store a pointer to that tree in
// `as_tree_.rep` and store a tag in `tagged_size`.
- union {
+ union {
char as_chars_[kMaxInline + 1];
AsTree as_tree_;
};
diff --git a/absl/strings/internal/cord_rep_btree.h b/absl/strings/internal/cord_rep_btree.h
index 8f000cab..303f4580 100644
--- a/absl/strings/internal/cord_rep_btree.h
+++ b/absl/strings/internal/cord_rep_btree.h
@@ -623,7 +623,7 @@ inline void CordRepBtree::Destroy(CordRepBtree* tree) {
inline CordRepBtree* CordRepBtree::CopyRaw() const {
auto* tree = static_cast<CordRepBtree*>(::operator new(sizeof(CordRepBtree)));
memcpy(static_cast<void*>(tree), this, sizeof(CordRepBtree));
- new (&tree->refcount) Refcount;
+ new (&tree->refcount) RefcountAndFlags;
return tree;
}