aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/private/SkOncePtr.h
diff options
context:
space:
mode:
authorGravatar herb <herb@google.com>2015-09-24 07:34:49 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-09-24 07:34:49 -0700
commit7f0a3d7523377097184309152f883f1fcb12a4b8 (patch)
treeb7f6d9ee1208ead229a40240d26d765dd446c2ae /include/private/SkOncePtr.h
parentc6363ef7b4763b36a9a0d255bc775a973d2fc7a5 (diff)
Make mutex semaphore based.
This implementation improves performance of SkMutex acquire / release pair from 42ns -> 13 ns. SkSharedMutex and SkSpinlock have the same performance. It also removes specialized windows and linux/mac code. BUG=skia: Review URL: https://codereview.chromium.org/1359733002
Diffstat (limited to 'include/private/SkOncePtr.h')
-rw-r--r--include/private/SkOncePtr.h17
1 files changed, 8 insertions, 9 deletions
diff --git a/include/private/SkOncePtr.h b/include/private/SkOncePtr.h
index ee30d8f8db..a1c028c4af 100644
--- a/include/private/SkOncePtr.h
+++ b/include/private/SkOncePtr.h
@@ -14,7 +14,7 @@
template <typename T> class SkBaseOncePtr;
// Use this to create a global static pointer that's intialized exactly once when you call get().
-#define SK_DECLARE_STATIC_ONCE_PTR(type, name) namespace {} static SkBaseOncePtr<type> name
+#define SK_DECLARE_STATIC_ONCE_PTR(type, name) namespace {} static SkBaseOncePtr<type> name;
// Use this for a local or member pointer that's initialized exactly once when you call get().
template <typename T, typename Delete = skstd::default_delete<T>>
@@ -61,17 +61,17 @@ class SkBaseOncePtr {
public:
template <typename F>
T* get(const F& f) const {
- uintptr_t state = fState.load(sk_memory_order_acquire);
+ uintptr_t state = sk_atomic_load(&fState, sk_memory_order_acquire);
if (state < 2) {
if (state == 0) {
// It looks like no one has tried to create our pointer yet.
// We try to claim that task by atomically swapping our state from '0' to '1'.
- if (fState.compare_exchange(&state, 1, sk_memory_order_relaxed,
- sk_memory_order_relaxed)) {
+ if (sk_atomic_compare_exchange(
+ &fState, &state, (uintptr_t)1, sk_memory_order_relaxed, sk_memory_order_relaxed)) {
// We've claimed it. Create our pointer and store it into fState.
state = (uintptr_t)f();
SkASSERT(state > 1);
- fState.store(state, sk_memory_order_release);
+ sk_atomic_store(&fState, state, sk_memory_order_release);
} else {
// Someone else claimed it.
// We fall through to the spin loop just below to wait for them to finish.
@@ -82,7 +82,7 @@ public:
// State '1' is our busy-but-not-done state.
// Some other thread has claimed the job of creating our pointer.
// We just need to wait for it to finish.
- state = fState.load(sk_memory_order_acquire);
+ state = sk_atomic_load(&fState, sk_memory_order_acquire);
}
// We shouldn't be able to get here without having created our pointer.
@@ -92,16 +92,15 @@ public:
}
operator T*() const {
- auto state = fState.load(sk_memory_order_acquire);
+ auto state = sk_atomic_load(&fState, sk_memory_order_acquire);
return state < 2 ? nullptr : (T*)state;
// TODO: If state == 1 spin until it's not?
}
-private:
// fState == 0 --> we have not created our ptr yet
// fState == 1 --> someone is in the middle of creating our ptr
// else --> (T*)fState is our ptr
- mutable SkAtomic<uintptr_t> fState;
+ mutable uintptr_t fState;
};
#endif//SkOncePtr_DEFINED