aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/core/SkOnce.h
blob: daeb819d428408d36ef4cb7b72407d2a12fc01e9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
/*
 * Copyright 2013 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#ifndef SkOnce_DEFINED
#define SkOnce_DEFINED

// SkOnce.h defines SK_DECLARE_STATIC_ONCE and SkOnce(), which you can use
// together to create a threadsafe way to call a function just once.  This
// is particularly useful for lazy singleton initialization. E.g.
//
// static void set_up_my_singleton(Singleton** singleton) {
//     *singleton = new Singleton(...);
// }
// ...
// const Singleton& GetSingleton() {
//     static Singleton* singleton = NULL;
//     SK_DECLARE_STATIC_ONCE(once);
//     SkOnce(&once, set_up_my_singleton, &singleton);
//     SkASSERT(NULL != singleton);
//     return *singleton;
// }
//
// OnceTest.cpp also should serve as a few other simple examples.
//
// You may optionally pass SkOnce a second function to be called at exit for cleanup.

#include "SkDynamicAnnotations.h"
#include "SkThread.h"
#include "SkTypes.h"

#define SK_ONCE_INIT { false, { 0, SkDEBUGCODE(0) } }
#define SK_DECLARE_STATIC_ONCE(name) static SkOnceFlag name = SK_ONCE_INIT

struct SkOnceFlag;  // If manually created, initialize with SkOnceFlag once = SK_ONCE_INIT

template <typename Func, typename Arg>
inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)() = NULL);

// If you've already got a lock and a flag to use, this variant lets you avoid an extra SkOnceFlag.
template <typename Lock, typename Func, typename Arg>
inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)() = NULL);

//  ----------------------  Implementation details below here. -----------------------------

// This is POD and must be zero-initialized.
struct SkSpinlock {
    void acquire() {
        SkASSERT(shouldBeZero == 0);
        // No memory barrier needed, but sk_atomic_cas gives us at least release anyway.
        while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) {
            // spin
        }
    }

    void release() {
        SkASSERT(shouldBeZero == 0);
        // This requires a release memory barrier before storing, which sk_atomic_cas guarantees.
        SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0));
    }

    int32_t thisIsPrivate;
    SkDEBUGCODE(int32_t shouldBeZero;)
};

struct SkOnceFlag {
    bool done;
    SkSpinlock lock;
};

// TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands.

#ifdef SK_BUILD_FOR_WIN
#  include <intrin.h>
inline static void compiler_barrier() {
    _ReadWriteBarrier();
}
#else
inline static void compiler_barrier() {
    asm volatile("" : : : "memory");
}
#endif

inline static void full_barrier_on_arm() {
#ifdef SK_CPU_ARM
#  if SK_ARM_ARCH >= 7
    asm volatile("dmb" : : : "memory");
#  else
    asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
#  endif
#endif
}

// On every platform, we issue a compiler barrier to prevent it from reordering
// code.  That's enough for platforms like x86 where release and acquire
// barriers are no-ops.  On other platforms we may need to be more careful;
// ARM, in particular, needs real code for both acquire and release.  We use a
// full barrier, which acts as both, because that the finest precision ARM
// provides.

inline static void release_barrier() {
    compiler_barrier();
    full_barrier_on_arm();
}

inline static void acquire_barrier() {
    compiler_barrier();
    full_barrier_on_arm();
}

// Works with SkSpinlock or SkMutex.
template <typename Lock>
class SkAutoLockAcquire {
public:
    explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); }
    ~SkAutoLockAcquire() { fLock->release(); }
private:
    Lock* fLock;
};

// We've pulled a pretty standard double-checked locking implementation apart
// into its main fast path and a slow path that's called when we suspect the
// one-time code hasn't run yet.

// This is the guts of the code, called when we suspect the one-time code hasn't been run yet.
// This should be rarely called, so we separate it from SkOnce and don't mark it as inline.
// (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.)
template <typename Lock, typename Func, typename Arg>
static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit)()) {
    const SkAutoLockAcquire<Lock> locked(lock);
    if (!*done) {
        f(arg);
        if (atExit != NULL) {
            atexit(atExit);
        }
        // Also known as a store-store/load-store barrier, this makes sure that the writes
        // done before here---in particular, those done by calling f(arg)---are observable
        // before the writes after the line, *done = true.
        //
        // In version control terms this is like saying, "check in the work up
        // to and including f(arg), then check in *done=true as a subsequent change".
        //
        // We'll use this in the fast path to make sure f(arg)'s effects are
        // observable whenever we observe *done == true.
        release_barrier();
        *done = true;
    }
}

// This is our fast path, called all the time.  We do really want it to be inlined.
template <typename Lock, typename Func, typename Arg>
inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) {
    if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) {
        sk_once_slow(done, lock, f, arg, atExit);
    }
    // Also known as a load-load/load-store barrier, this acquire barrier makes
    // sure that anything we read from memory---in particular, memory written by
    // calling f(arg)---is at least as current as the value we read from once->done.
    //
    // In version control terms, this is a lot like saying "sync up to the
    // commit where we wrote once->done = true".
    //
    // The release barrier in sk_once_slow guaranteed that once->done = true
    // happens after f(arg), so by syncing to once->done = true here we're
    // forcing ourselves to also wait until the effects of f(arg) are readble.
    acquire_barrier();
}

template <typename Func, typename Arg>
inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) {
    return SkOnce(&once->done, &once->lock, f, arg, atExit);
}

#undef SK_ANNOTATE_BENIGN_RACE

#endif  // SkOnce_DEFINED