summaryrefslogtreecommitdiff
path: root/absl/base
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base')
-rw-r--r--absl/base/call_once_test.cc12
-rw-r--r--absl/base/internal/low_level_alloc.cc12
-rw-r--r--absl/base/internal/spinlock.h14
3 files changed, 19 insertions, 19 deletions
diff --git a/absl/base/call_once_test.cc b/absl/base/call_once_test.cc
index 9c2a0c44..9a5a5c13 100644
--- a/absl/base/call_once_test.cc
+++ b/absl/base/call_once_test.cc
@@ -30,11 +30,11 @@ absl::once_flag once;
ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit);
-int running_thread_count GUARDED_BY(counters_mu) = 0;
-int call_once_invoke_count GUARDED_BY(counters_mu) = 0;
-int call_once_finished_count GUARDED_BY(counters_mu) = 0;
-int call_once_return_count GUARDED_BY(counters_mu) = 0;
-bool done_blocking GUARDED_BY(counters_mu) = false;
+int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0;
+int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0;
+int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0;
+int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0;
+bool done_blocking ABSL_GUARDED_BY(counters_mu) = false;
// Function to be called from absl::call_once. Waits for a notification.
void WaitAndIncrement() {
@@ -60,7 +60,7 @@ void ThreadBody() {
}
// Returns true if all threads are set up for the test.
-bool ThreadsAreSetup(void*) EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
+bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
// All ten threads must be running, and WaitAndIncrement should be blocked.
return running_thread_count == 10 && call_once_invoke_count == 1;
}
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
index 36e4f1ba..64d7aa80 100644
--- a/absl/base/internal/low_level_alloc.cc
+++ b/absl/base/internal/low_level_alloc.cc
@@ -203,9 +203,9 @@ struct LowLevelAlloc::Arena {
base_internal::SpinLock mu;
// Head of free list, sorted by address
- AllocList freelist GUARDED_BY(mu);
+ AllocList freelist ABSL_GUARDED_BY(mu);
// Count of allocated blocks
- int32_t allocation_count GUARDED_BY(mu);
+ int32_t allocation_count ABSL_GUARDED_BY(mu);
// flags passed to NewArena
const uint32_t flags;
// Result of sysconf(_SC_PAGESIZE)
@@ -215,7 +215,7 @@ struct LowLevelAlloc::Arena {
// Smallest allocation block size
const size_t min_size;
// PRNG state
- uint32_t random GUARDED_BY(mu);
+ uint32_t random ABSL_GUARDED_BY(mu);
};
namespace {
@@ -275,10 +275,10 @@ static const uintptr_t kMagicAllocated = 0x4c833e95U;
static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
-class SCOPED_LOCKABLE ArenaLock {
+class ABSL_SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
- EXCLUSIVE_LOCK_FUNCTION(arena->mu)
+ ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: arena_(arena) {
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
@@ -290,7 +290,7 @@ class SCOPED_LOCKABLE ArenaLock {
arena_->mu.Lock();
}
~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
- void Leave() UNLOCK_FUNCTION() {
+ void Leave() ABSL_UNLOCK_FUNCTION() {
arena_->mu.Unlock();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (mask_valid_) {
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index 6ee60ac8..1d1bd6cf 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -48,7 +48,7 @@
namespace absl {
namespace base_internal {
-class LOCKABLE SpinLock {
+class ABSL_LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockCooperative) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
@@ -79,7 +79,7 @@ class LOCKABLE SpinLock {
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
// Acquire this SpinLock.
- inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
+ inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
if (!TryLockImpl()) {
SlowLock();
@@ -91,7 +91,7 @@ class LOCKABLE SpinLock {
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
- inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+ inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
@@ -101,7 +101,7 @@ class LOCKABLE SpinLock {
}
// Release this SpinLock, which must be held by the calling thread.
- inline void Unlock() UNLOCK_FUNCTION() {
+ inline void Unlock() ABSL_UNLOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
@@ -179,13 +179,13 @@ class LOCKABLE SpinLock {
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
-class SCOPED_LOCKABLE SpinLockHolder {
+class ABSL_SCOPED_LOCKABLE SpinLockHolder {
public:
- inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
+ inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
- inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
+ inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
SpinLockHolder(const SpinLockHolder&) = delete;
SpinLockHolder& operator=(const SpinLockHolder&) = delete;