aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/private
diff options
context:
space:
mode:
Diffstat (limited to 'include/private')
-rw-r--r--include/private/SkAtomics.h28
-rw-r--r--include/private/SkOnce.h2
-rw-r--r--include/private/SkWeakRefCnt.h60
3 files changed, 39 insertions, 51 deletions
diff --git a/include/private/SkAtomics.h b/include/private/SkAtomics.h
index 249723d785..bfe93d8bac 100644
--- a/include/private/SkAtomics.h
+++ b/include/private/SkAtomics.h
@@ -159,32 +159,4 @@ inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }
-inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
- return sk_atomic_compare_exchange(ptr, &expected, desired);
-}
-
-inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
- (void)sk_atomic_compare_exchange(ptr, &expected, desired);
- return expected;
-}
-
-inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
- int32_t prev = sk_atomic_load(ptr);
- do {
- if (0 == prev) {
- break;
- }
- } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
- return prev;
-}
-
-template <typename T>
-T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }
-
-template <typename T>
-void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }
-
-inline void sk_membar_acquire__after_atomic_dec() {}
-inline void sk_membar_acquire__after_atomic_conditional_inc() {}
-
#endif//SkAtomics_DEFINED
diff --git a/include/private/SkOnce.h b/include/private/SkOnce.h
index 5434d9d7d9..34eb79cd77 100644
--- a/include/private/SkOnce.h
+++ b/include/private/SkOnce.h
@@ -83,7 +83,7 @@ static void sk_once_slow(bool* done, Lock* lock, void (*f)(Arg), Arg arg) {
//
// We'll use this in the fast path to make sure f(arg)'s effects are
// observable whenever we observe *done == true.
- sk_release_store(done, true);
+ sk_atomic_store(done, true, sk_memory_order_release);
}
lock->release();
}
diff --git a/include/private/SkWeakRefCnt.h b/include/private/SkWeakRefCnt.h
index 1a78ba5092..d6631e946f 100644
--- a/include/private/SkWeakRefCnt.h
+++ b/include/private/SkWeakRefCnt.h
@@ -9,7 +9,7 @@
#define SkWeakRefCnt_DEFINED
#include "SkRefCnt.h"
-#include "../private/SkAtomics.h"
+#include <atomic>
/** \class SkWeakRefCnt
@@ -62,22 +62,39 @@ public:
*/
virtual ~SkWeakRefCnt() {
#ifdef SK_DEBUG
- SkASSERT(fWeakCnt == 1);
- fWeakCnt = 0;
+ SkASSERT(getWeakCnt() == 1);
+ fWeakCnt.store(0, std::memory_order_relaxed);
#endif
}
- /** Return the weak reference count.
- */
- int32_t getWeakCnt() const { return fWeakCnt; }
-
#ifdef SK_DEBUG
+ /** Return the weak reference count. */
+ int32_t getWeakCnt() const {
+ return fWeakCnt.load(std::memory_order_relaxed);
+ }
+
void validate() const {
this->INHERITED::validate();
- SkASSERT(fWeakCnt > 0);
+ SkASSERT(getWeakCnt() > 0);
}
#endif
+private:
+ /** If fRefCnt is 0, returns 0.
+ * Otherwise increments fRefCnt, acquires, and returns the old value.
+ */
+ int32_t atomic_conditional_acquire_strong_ref() const {
+ int32_t prev = fRefCnt.load(std::memory_order_relaxed);
+ do {
+ if (0 == prev) {
+ break;
+ }
+ } while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire,
+ std::memory_order_relaxed));
+ return prev;
+ }
+
+public:
/** Creates a strong reference from a weak reference, if possible. The
caller must already be an owner. If try_ref() returns true the owner
is in posession of an additional strong reference. Both the original
@@ -86,10 +103,9 @@ public:
reference is in the same state as before the call.
*/
bool SK_WARN_UNUSED_RESULT try_ref() const {
- if (sk_atomic_conditional_inc(&fRefCnt) != 0) {
+ if (atomic_conditional_acquire_strong_ref() != 0) {
// Acquire barrier (L/SL), if not provided above.
// Prevents subsequent code from happening before the increment.
- sk_membar_acquire__after_atomic_conditional_inc();
return true;
}
return false;
@@ -99,9 +115,10 @@ public:
weak_unref().
*/
void weak_ref() const {
- SkASSERT(fRefCnt > 0);
- SkASSERT(fWeakCnt > 0);
- sk_atomic_inc(&fWeakCnt); // No barrier required.
+ SkASSERT(getRefCnt() > 0);
+ SkASSERT(getWeakCnt() > 0);
+ // No barrier required.
+ (void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed);
}
/** Decrement the weak reference count. If the weak reference count is 1
@@ -110,15 +127,14 @@ public:
not on the stack.
*/
void weak_unref() const {
- SkASSERT(fWeakCnt > 0);
- // Release barrier (SL/S), if not provided below.
- if (sk_atomic_dec(&fWeakCnt) == 1) {
- // Acquire barrier (L/SL), if not provided above.
- // Prevents code in destructor from happening before the decrement.
- sk_membar_acquire__after_atomic_dec();
+ SkASSERT(getWeakCnt() > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ // Like try_ref(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
#ifdef SK_DEBUG
// so our destructor won't complain
- fWeakCnt = 1;
+ fWeakCnt.store(1, std::memory_order_relaxed);
#endif
this->INHERITED::internal_dispose();
}
@@ -128,7 +144,7 @@ public:
is the case all future calls to try_ref() will return false.
*/
bool weak_expired() const {
- return fRefCnt == 0;
+ return fRefCnt.load(std::memory_order_relaxed) == 0;
}
protected:
@@ -151,7 +167,7 @@ private:
}
/* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
- mutable int32_t fWeakCnt;
+ mutable std::atomic<int32_t> fWeakCnt;
typedef SkRefCnt INHERITED;
};