aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/private/SkAtomics.h
blob: 249723d785865610fb46a00bd3d259e0a22c1d1d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
/*
 * Copyright 2015 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#ifndef SkAtomics_DEFINED
#define SkAtomics_DEFINED

// This file is not part of the public Skia API.
#include "SkTypes.h"
#include <atomic>

// ~~~~~~~~ APIs ~~~~~~~~~

enum sk_memory_order {
    sk_memory_order_relaxed,
    sk_memory_order_consume,
    sk_memory_order_acquire,
    sk_memory_order_release,
    sk_memory_order_acq_rel,
    sk_memory_order_seq_cst,
};

template <typename T>
T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
bool sk_atomic_compare_exchange(T*, T* expected, T desired,
                                sk_memory_order success = sk_memory_order_seq_cst,
                                sk_memory_order failure = sk_memory_order_seq_cst);

template <typename T>
T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);

// A little wrapper class for small T (think, builtins: int, float, void*) to
// ensure they're always used atomically.  This is our stand-in for std::atomic<T>.
// !!! Please _really_ know what you're doing if you change default_memory_order. !!!
template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
class SkAtomic : SkNoncopyable {
public:
    SkAtomic() {}
    explicit SkAtomic(const T& val) : fVal(val) {}

    // It is essential we return by value rather than by const&.  fVal may change at any time.
    T load(sk_memory_order mo = default_memory_order) const {
        return sk_atomic_load(&fVal, mo);
    }

    void store(const T& val, sk_memory_order mo = default_memory_order) {
        sk_atomic_store(&fVal, val, mo);
    }

    // Alias for .load(default_memory_order).
    operator T() const {
        return this->load();
    }

    // Alias for .store(v, default_memory_order).
    T operator=(const T& v) {
        this->store(v);
        return v;
    }

    T fetch_add(const T& val, sk_memory_order mo = default_memory_order) {
        return sk_atomic_fetch_add(&fVal, val, mo);
    }

    T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) {
        return sk_atomic_fetch_sub(&fVal, val, mo);
    }

    bool compare_exchange(T* expected, const T& desired,
                          sk_memory_order success = default_memory_order,
                          sk_memory_order failure = default_memory_order) {
        return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
    }
private:
    T fVal;
};

// ~~~~~~~~ Implementations ~~~~~~~~~

template <typename T>
T sk_atomic_load(const T* ptr, sk_memory_order mo) {
    SkASSERT(mo == sk_memory_order_relaxed ||
             mo == sk_memory_order_seq_cst ||
             mo == sk_memory_order_acquire ||
             mo == sk_memory_order_consume);
    const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
    return std::atomic_load_explicit(ap, (std::memory_order)mo);
}

template <typename T>
void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
    SkASSERT(mo == sk_memory_order_relaxed ||
             mo == sk_memory_order_seq_cst ||
             mo == sk_memory_order_release);
    std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
    return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
}

template <typename T>
T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
    // All values of mo are valid.
    std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
    return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
}

template <typename T>
T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) {
    // All values of mo are valid.
    std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
    return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo);
}

template <typename T>
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
                                sk_memory_order success,
                                sk_memory_order failure) {
    // All values of success are valid.
    SkASSERT(failure == sk_memory_order_relaxed ||
             failure == sk_memory_order_seq_cst ||
             failure == sk_memory_order_acquire ||
             failure == sk_memory_order_consume);
    SkASSERT(failure <= success);
    std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
    return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
                                                        (std::memory_order)success,
                                                        (std::memory_order)failure);
}

template <typename T>
T sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) {
    // All values of mo are valid.
    std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
    return std::atomic_exchange_explicit(ap, val, (std::memory_order)mo);
}

// ~~~~~~~~ Legacy APIs ~~~~~~~~~

// From here down we have shims for our old atomics API, to be weaned off of.
// We use the default sequentially-consistent memory order to make things simple
// and to match the practical reality of our old _sync and _win implementations.

inline int32_t sk_atomic_inc(int32_t* ptr)            { return sk_atomic_fetch_add(ptr, +1); }
inline int32_t sk_atomic_dec(int32_t* ptr)            { return sk_atomic_fetch_add(ptr, -1); }
inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_add(ptr,  v); }

inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }

inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
    return sk_atomic_compare_exchange(ptr, &expected, desired);
}

inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
    (void)sk_atomic_compare_exchange(ptr, &expected, desired);
    return expected;
}

inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
    int32_t prev = sk_atomic_load(ptr);
    do {
        if (0 == prev) {
            break;
        }
    } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
    return prev;
}

template <typename T>
T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }

template <typename T>
void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }

inline void sk_membar_acquire__after_atomic_dec() {}
inline void sk_membar_acquire__after_atomic_conditional_inc() {}

#endif//SkAtomics_DEFINED