aboutsummaryrefslogtreecommitdiff
path: root/absl/strings
diff options
context:
space:
mode:
Diffstat (limited to 'absl/strings')
-rw-r--r--absl/strings/CMakeLists.txt1
-rw-r--r--absl/strings/internal/cordz_handle.cc36
-rw-r--r--absl/strings/internal/cordz_handle.h48
3 files changed, 55 insertions, 30 deletions
diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt
index 044e38b..8f41f06 100644
--- a/absl/strings/CMakeLists.txt
+++ b/absl/strings/CMakeLists.txt
@@ -655,6 +655,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
+ absl::raw_logging_internal
absl::synchronization
)
diff --git a/absl/strings/internal/cordz_handle.cc b/absl/strings/internal/cordz_handle.cc
index 7e82f85..22d0797 100644
--- a/absl/strings/internal/cordz_handle.cc
+++ b/absl/strings/internal/cordz_handle.cc
@@ -21,26 +21,26 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-ABSL_CONST_INIT absl::Mutex CordzHandle::mutex_(absl::kConstInit);
-ABSL_CONST_INIT std::atomic<CordzHandle*> CordzHandle::dq_tail_{nullptr};
+ABSL_CONST_INIT CordzHandle::Queue CordzHandle::global_queue_(absl::kConstInit);
CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
if (is_snapshot) {
- MutexLock lock(&mutex_);
- CordzHandle* dq_tail = dq_tail_.load(std::memory_order_acquire);
+ MutexLock lock(&queue_->mutex);
+ CordzHandle* dq_tail = queue_->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
dq_prev_ = dq_tail;
dq_tail->dq_next_ = this;
}
- dq_tail_.store(this, std::memory_order_release);
+ queue_->dq_tail.store(this, std::memory_order_release);
}
}
CordzHandle::~CordzHandle() {
+ ODRCheck();
if (is_snapshot_) {
std::vector<CordzHandle*> to_delete;
{
- absl::MutexLock lock(&mutex_);
+ absl::MutexLock lock(&queue_->mutex);
CordzHandle* next = dq_next_;
if (dq_prev_ == nullptr) {
// We were head of the queue, delete every CordzHandle until we reach
@@ -56,7 +56,7 @@ CordzHandle::~CordzHandle() {
if (next) {
next->dq_prev_ = dq_prev_;
} else {
- dq_tail_.store(dq_prev_, std::memory_order_release);
+ queue_->dq_tail.store(dq_prev_, std::memory_order_release);
}
}
for (CordzHandle* handle : to_delete) {
@@ -67,13 +67,15 @@ CordzHandle::~CordzHandle() {
void CordzHandle::Delete(CordzHandle* handle) {
if (handle) {
- if (!handle->is_snapshot_ && !UnsafeDeleteQueueEmpty()) {
- MutexLock lock(&mutex_);
- CordzHandle* dq_tail = dq_tail_.load(std::memory_order_acquire);
+ handle->ODRCheck();
+ Queue* const queue = handle->queue_;
+ if (!handle->is_snapshot_ && !queue->IsEmpty()) {
+ MutexLock lock(&queue->mutex);
+ CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
handle->dq_prev_ = dq_tail;
dq_tail->dq_next_ = handle;
- dq_tail_.store(handle, std::memory_order_release);
+ queue->dq_tail.store(handle, std::memory_order_release);
return;
}
}
@@ -83,8 +85,8 @@ void CordzHandle::Delete(CordzHandle* handle) {
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
std::vector<const CordzHandle*> handles;
- MutexLock lock(&mutex_);
- CordzHandle* dq_tail = dq_tail_.load(std::memory_order_acquire);
+ MutexLock lock(&global_queue_.mutex);
+ CordzHandle* dq_tail = global_queue_.dq_tail.load(std::memory_order_acquire);
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
handles.push_back(p);
}
@@ -93,12 +95,13 @@ std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
const CordzHandle* handle) const {
+ ODRCheck();
if (!is_snapshot_) return false;
if (handle == nullptr) return true;
if (handle->is_snapshot_) return false;
bool snapshot_found = false;
- MutexLock lock(&mutex_);
- for (const CordzHandle* p = dq_tail_; p; p = p->dq_prev_) {
+ MutexLock lock(&queue_->mutex);
+ for (const CordzHandle* p = queue_->dq_tail; p; p = p->dq_prev_) {
if (p == handle) return !snapshot_found;
if (p == this) snapshot_found = true;
}
@@ -108,12 +111,13 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
std::vector<const CordzHandle*>
CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
+ ODRCheck();
std::vector<const CordzHandle*> handles;
if (!is_snapshot()) {
return handles;
}
- MutexLock lock(&mutex_);
+ MutexLock lock(&queue_->mutex);
for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
if (!p->is_snapshot()) {
handles.push_back(p);
diff --git a/absl/strings/internal/cordz_handle.h b/absl/strings/internal/cordz_handle.h
index 0235d4b..71563c8 100644
--- a/absl/strings/internal/cordz_handle.h
+++ b/absl/strings/internal/cordz_handle.h
@@ -19,6 +19,7 @@
#include <vector>
#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/synchronization/mutex.h"
namespace absl {
@@ -66,23 +67,42 @@ class CordzHandle {
virtual ~CordzHandle();
private:
- // Returns true if the delete queue is empty. This method does not acquire the
- // lock, but does a 'load acquire' observation on the delete queue tail. It
- // is used inside Delete() to check for the presence of a delete queue without
- // holding the lock. The assumption is that the caller is in the state of
- // 'being deleted', and can not be newly discovered by a concurrent 'being
- // constructed' snapshot instance. Practically, this means that any such
- // discovery (`find`, 'first' or 'next', etc) must have proper 'happens before
- // / after' semantics and atomic fences.
- static bool UnsafeDeleteQueueEmpty() ABSL_NO_THREAD_SAFETY_ANALYSIS {
- return dq_tail_.load(std::memory_order_acquire) == nullptr;
+ // Global queue data. CordzHandle stores a pointer to the global queue
+ // instance to harden against ODR violations.
+ struct Queue {
+ constexpr explicit Queue(absl::ConstInitType) : mutex(absl::kConstInit) {}
+
+ absl::Mutex mutex;
+ std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
+
+ // Returns true if this delete queue is empty. This method does not acquire
+ // the lock, but does a 'load acquire' observation on the delete queue tail.
+ // It is used inside Delete() to check for the presence of a delete queue
+ // without holding the lock. The assumption is that the caller is in the
+ // state of 'being deleted', and can not be newly discovered by a concurrent
+ // 'being constructed' snapshot instance. Practically, this means that any
+ // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
+ // before / after' semantics and atomic fences.
+ bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ return dq_tail.load(std::memory_order_acquire) == nullptr;
+ }
+ };
+
+ void ODRCheck() const {
+#ifndef NDEBUG
+ ABSL_RAW_CHECK(queue_ == &global_queue_, "ODR violation in Cord");
+#endif
}
+ ABSL_CONST_INIT static Queue global_queue_;
+ Queue* const queue_ = &global_queue_;
const bool is_snapshot_;
- static absl::Mutex mutex_;
- static std::atomic<CordzHandle*> dq_tail_ ABSL_GUARDED_BY(mutex_);
- CordzHandle* dq_prev_ ABSL_GUARDED_BY(mutex_) = nullptr;
- CordzHandle* dq_next_ ABSL_GUARDED_BY(mutex_) = nullptr;
+
+ // dq_prev_ and dq_next_ require the global queue mutex to be held.
+ // Unfortunately we can't use thread annotations such that the thread safety
+ // analysis understands that queue_ and global_queue_ are one and the same.
+ CordzHandle* dq_prev_ = nullptr;
+ CordzHandle* dq_next_ = nullptr;
};
class CordzSnapshot : public CordzHandle {