aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/core
diff options
context:
space:
mode:
authorGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-02-10 19:58:49 +0000
committerGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-02-10 19:58:49 +0000
commitba9354b9d4de62988cc2a56c0760fd5e52da3679 (patch)
tree724723cfc3860a37f402327ea3cb58d7a1a43c06 /include/core
parent50b393a768c0311b3210f723325fd27bf161136b (diff)
SkOnce in is_lcd_supported instead of hand rolled double-checked locking.
BUG=skia: R=bungeman@google.com, reed@google.com Author: mtklein@google.com Review URL: https://codereview.chromium.org/155963003 git-svn-id: http://skia.googlecode.com/svn/trunk@13387 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'include/core')
-rw-r--r--include/core/SkOnce.h57
-rw-r--r--include/core/SkThread.h29
2 files changed, 48 insertions, 38 deletions
diff --git a/include/core/SkOnce.h b/include/core/SkOnce.h
index 59eaf598bc..daeb819d42 100644
--- a/include/core/SkOnce.h
+++ b/include/core/SkOnce.h
@@ -40,8 +40,32 @@ struct SkOnceFlag; // If manually created, initialize with SkOnceFlag once = SK
template <typename Func, typename Arg>
inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)() = NULL);
+// If you've already got a lock and a flag to use, this variant lets you avoid an extra SkOnceFlag.
+template <typename Lock, typename Func, typename Arg>
+inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)() = NULL);
+
// ---------------------- Implementation details below here. -----------------------------
+// This is POD and must be zero-initialized.
+struct SkSpinlock {
+ void acquire() {
+ SkASSERT(shouldBeZero == 0);
+ // No memory barrier needed, but sk_atomic_cas gives us at least release anyway.
+ while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) {
+ // spin
+ }
+ }
+
+ void release() {
+ SkASSERT(shouldBeZero == 0);
+ // This requires a release memory barrier before storing, which sk_atomic_cas guarantees.
+ SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0));
+ }
+
+ int32_t thisIsPrivate;
+ SkDEBUGCODE(int32_t shouldBeZero;)
+};
+
struct SkOnceFlag {
bool done;
SkSpinlock lock;
@@ -87,6 +111,16 @@ inline static void acquire_barrier() {
full_barrier_on_arm();
}
+// Works with SkSpinlock or SkMutex.
+template <typename Lock>
+class SkAutoLockAcquire {
+public:
+ explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); }
+ ~SkAutoLockAcquire() { fLock->release(); }
+private:
+ Lock* fLock;
+};
+
// We've pulled a pretty standard double-checked locking implementation apart
// into its main fast path and a slow path that's called when we suspect the
// one-time code hasn't run yet.
@@ -94,10 +128,10 @@ inline static void acquire_barrier() {
// This is the guts of the code, called when we suspect the one-time code hasn't been run yet.
// This should be rarely called, so we separate it from SkOnce and don't mark it as inline.
// (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.)
-template <typename Func, typename Arg>
-static void sk_once_slow(SkOnceFlag* once, Func f, Arg arg, void (*atExit)()) {
- const SkAutoSpinlock lock(&once->lock);
- if (!once->done) {
+template <typename Lock, typename Func, typename Arg>
+static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit)()) {
+ const SkAutoLockAcquire<Lock> locked(lock);
+ if (!*done) {
f(arg);
if (atExit != NULL) {
atexit(atExit);
@@ -112,15 +146,15 @@ static void sk_once_slow(SkOnceFlag* once, Func f, Arg arg, void (*atExit)()) {
// We'll use this in the fast path to make sure f(arg)'s effects are
// observable whenever we observe *done == true.
release_barrier();
- once->done = true;
+ *done = true;
}
}
// This is our fast path, called all the time. We do really want it to be inlined.
-template <typename Func, typename Arg>
-inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) {
- if (!SK_ANNOTATE_UNPROTECTED_READ(once->done)) {
- sk_once_slow(once, f, arg, atExit);
+template <typename Lock, typename Func, typename Arg>
+inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) {
+ if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) {
+ sk_once_slow(done, lock, f, arg, atExit);
}
// Also known as a load-load/load-store barrier, this acquire barrier makes
// sure that anything we read from memory---in particular, memory written by
@@ -135,6 +169,11 @@ inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) {
acquire_barrier();
}
+template <typename Func, typename Arg>
+inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) {
+ return SkOnce(&once->done, &once->lock, f, arg, atExit);
+}
+
#undef SK_ANNOTATE_BENIGN_RACE
#endif // SkOnce_DEFINED
diff --git a/include/core/SkThread.h b/include/core/SkThread.h
index 160b40ce29..7e2c90ed51 100644
--- a/include/core/SkThread.h
+++ b/include/core/SkThread.h
@@ -51,35 +51,6 @@ static void sk_membar_acquire__after_atomic_conditional_inc();
#include SK_ATOMICS_PLATFORM_H
-// This is POD and must be zero-initialized.
-struct SkSpinlock {
- void acquire() {
- SkASSERT(shouldBeZero == 0);
- // No memory barrier needed, but sk_atomic_cas gives us at least release anyway.
- while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) {
- // spin
- }
- }
-
- void release() {
- SkASSERT(shouldBeZero == 0);
- // This requires a release memory barrier before storing, which sk_atomic_cas guarantees.
- SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0));
- }
-
- int32_t thisIsPrivate;
- SkDEBUGCODE(int32_t shouldBeZero;)
-};
-
-class SkAutoSpinlock : SkNoncopyable {
-public:
- explicit SkAutoSpinlock(SkSpinlock* lock) : fLock(lock) { fLock->acquire(); }
- ~SkAutoSpinlock() { fLock->release(); }
-private:
- SkSpinlock* fLock;
-};
-#define SkAutoSpinlock(...) SK_REQUIRE_LOCAL_VAR(SkAutoSpinlock)
-
/** SK_MUTEX_PLATFORM_H must provide the following (or equivalent) declarations.
class SkBaseMutex {