diff options
-rw-r--r-- | absl/base/internal/spinlock.h | 8 | ||||
-rw-r--r-- | absl/container/internal/btree.h | 6 |
2 files changed, 11 insertions, 3 deletions
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h index ac40daff..6d8d8ddd 100644 --- a/absl/base/internal/spinlock.h +++ b/absl/base/internal/spinlock.h @@ -120,6 +120,14 @@ class ABSL_LOCKABLE SpinLock { return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; } + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { + if (!IsHeld()) { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + protected: // These should not be exported except for testing. diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 29603379..26bd5e14 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -238,7 +238,7 @@ struct common_params { using allocator_type = Alloc; using key_type = Key; - using size_type = std::make_signed<size_t>::type; + using size_type = size_t; using difference_type = ptrdiff_t; using slot_policy = SlotPolicy; @@ -1497,7 +1497,7 @@ inline void btree_node<P>::emplace_value(const size_type i, set_finish(finish() + 1); if (!leaf() && finish() > i + 1) { - for (int j = finish(); j > i + 1; --j) { + for (field_type j = finish(); j > i + 1; --j) { set_child(j, child(j - 1)); } clear_child(i + 1); @@ -2124,7 +2124,7 @@ auto btree<P>::erase_range(iterator begin, iterator end) return {0, begin}; } - if (count == size_) { + if (static_cast<size_type>(count) == size_) { clear(); return {count, this->end()}; } |