aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/core
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-02-03 13:38:58 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2015-02-03 13:38:58 -0800
commit7b274c78fbeefa3818af68099545f2839c854847 (patch)
tree6b18a0bedf025c859d751498f4048882948b9d68 /include/core
parent65fd599e43247e139bc3e7afaff9392073fec1bc (diff)
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize(). The big win is that ref() doesn't generate a memory barrier any more on ARM. This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL: 4d0: 684a ldr r2, [r1, #4] 4d2: 6018 str r0, [r3, #0] 4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e> 4d6: 1d10 adds r0, r2, #4 4d8: e850 4f00 ldrex r4, [r0] 4dc: 3401 adds r4, #1 4de: e840 4500 strex r5, r4, [r0] 4e2: 2d00 cmp r5, #0 4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20> Here's the before, pretty much the same with two memory barriers surrounding the ref(): 4d8: 684a ldr r2, [r1, #4] 4da: 6018 str r0, [r3, #0] 4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e> 4de: 1d10 adds r0, r2, #4 4e0: f3bf 8f5f dmb sy 4e4: e850 4f00 ldrex r4, [r0] 4e8: 3401 adds r4, #1 4ea: e840 4500 strex r5, r4, [r0] 4ee: 2d00 cmp r5, #0 4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c> 4f2: f3bf 8f5f dmb sy The miscellaneous files in here are just fixups to explicitly include SkMutex.h, instead of leeching it off SkRefCnt.h. No public API changes. TBR=reed@google.com Build trybots seem hosed. NOTRY=true BUG=skia: Review URL: https://codereview.chromium.org/896803002
Diffstat (limited to 'include/core')
-rw-r--r--include/core/SkRefCnt.h40
1 files changed, 17 insertions, 23 deletions
diff --git a/include/core/SkRefCnt.h b/include/core/SkRefCnt.h
index cf3e385b25..9c62fe2740 100644
--- a/include/core/SkRefCnt.h
+++ b/include/core/SkRefCnt.h
@@ -10,8 +10,7 @@
#ifndef SkRefCnt_DEFINED
#define SkRefCnt_DEFINED
-#include "SkDynamicAnnotations.h"
-#include "SkThread.h"
+#include "SkAtomics.h"
#include "SkInstCnt.h"
#include "SkTemplates.h"
@@ -51,22 +50,20 @@ public:
* Ensures that all previous owner's actions are complete.
*/
bool unique() const {
- // We believe we're reading fRefCnt in a safe way here, so we stifle the TSAN warning about
- // an unproctected read. Generally, don't read fRefCnt, and don't stifle this warning.
- bool const unique = (1 == sk_acquire_load(&fRefCnt));
- if (unique) {
- // Acquire barrier (L/SL), if not provided by load of fRefCnt.
- // Prevents user's 'unique' code from happening before decrements.
- //TODO: issue the barrier only when unique is true
+ if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return true;
}
- return unique;
+ return false;
}
/** Increment the reference count. Must be balanced by a call to unref().
*/
void ref() const {
SkASSERT(fRefCnt > 0);
- sk_atomic_inc(&fRefCnt); // No barrier required.
+ (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required.
}
/** Decrement the reference count. If the reference count is 1 before the
@@ -75,12 +72,11 @@ public:
*/
void unref() const {
SkASSERT(fRefCnt > 0);
- // Release barrier (SL/S), if not provided below.
- if (sk_atomic_dec(&fRefCnt) == 1) {
- // Acquire barrier (L/SL), if not provided above.
- // Prevents code in dispose from happening before the decrement.
- sk_membar_acquire__after_atomic_dec();
- internal_dispose();
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
}
}
@@ -262,17 +258,15 @@ public:
// - ref() doesn't need any barrier;
// - unref() needs a release barrier, and an acquire if it's going to call delete.
- bool unique() const { return 1 == sk_acquire_load(&fRefCnt); }
- void ref() const { sk_atomic_inc(&fRefCnt); }
+ bool unique() const { return 1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire); }
+ void ref() const { (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); }
void unref() const {
- int32_t prevValue = sk_atomic_dec(&fRefCnt);
- SkASSERT(prevValue >= 1);
- if (1 == prevValue) {
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
SkDEBUGCODE(fRefCnt = 1;) // restore the 1 for our destructor's assert
SkDELETE((const Derived*)this);
}
}
- void deref() const { this->unref(); } // Chrome prefers to call deref().
+ void deref() const { this->unref(); }
private:
mutable int32_t fRefCnt;