aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/private/SkAtomics.h
blob: e947d1a9a47775d1673761ab9f29657cb2e757a0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*
 * Copyright 2015 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#ifndef SkAtomics_DEFINED
#define SkAtomics_DEFINED

// This file is not part of the public Skia API.
#include "SkTypes.h"

enum sk_memory_order {
    sk_memory_order_relaxed,
    sk_memory_order_consume,
    sk_memory_order_acquire,
    sk_memory_order_release,
    sk_memory_order_acq_rel,
    sk_memory_order_seq_cst,
};

template <typename T>
T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);

template <typename T>
bool sk_atomic_compare_exchange(T*, T* expected, T desired,
                                sk_memory_order success = sk_memory_order_seq_cst,
                                sk_memory_order failure = sk_memory_order_seq_cst);

template <typename T>
T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);

// A little wrapper class for small T (think, builtins: int, float, void*) to
// ensure they're always used atomically.  This is our stand-in for std::atomic<T>.
template <typename T>
class SkAtomic : SkNoncopyable {
public:
    SkAtomic() {}
    explicit SkAtomic(const T& val) : fVal(val) {}

    // It is essential we return by value rather than by const&.  fVal may change at any time.
    T load(sk_memory_order mo = sk_memory_order_seq_cst) const {
        return sk_atomic_load(&fVal, mo);
    }

    void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
        sk_atomic_store(&fVal, val, mo);
    }

    // Alias for .load(sk_memory_order_seq_cst).
    operator T() const {
        return this->load();
    }

    // Alias for .store(v, sk_memory_order_seq_cst).
    T operator=(const T& v) {
        this->store(v);
        return v;
    }

    T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
        return sk_atomic_fetch_add(&fVal, val, mo);
    }

    T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
        return sk_atomic_fetch_sub(&fVal, val, mo);
    }

    bool compare_exchange(T* expected, const T& desired,
                          sk_memory_order success = sk_memory_order_seq_cst,
                          sk_memory_order failure = sk_memory_order_seq_cst) {
        return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
    }
private:
    T fVal;
};

// IWYU pragma: begin_exports
#if defined(_MSC_VER)
    #include "../ports/SkAtomics_std.h"
#elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED)
    #include "../ports/SkAtomics_atomic.h"
#else
    #include "../ports/SkAtomics_sync.h"
#endif
// IWYU pragma: end_exports

// From here down we have shims for our old atomics API, to be weaned off of.
// We use the default sequentially-consistent memory order to make things simple
// and to match the practical reality of our old _sync and _win implementations.

inline int32_t sk_atomic_inc(int32_t* ptr)            { return sk_atomic_fetch_add(ptr, +1); }
inline int32_t sk_atomic_dec(int32_t* ptr)            { return sk_atomic_fetch_add(ptr, -1); }
inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_add(ptr,  v); }

inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }

inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
    return sk_atomic_compare_exchange(ptr, &expected, desired);
}

inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
    (void)sk_atomic_compare_exchange(ptr, &expected, desired);
    return expected;
}

inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
    int32_t prev = sk_atomic_load(ptr);
    do {
        if (0 == prev) {
            break;
        }
    } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
    return prev;
}

template <typename T>
T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }

template <typename T>
void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }

inline void sk_membar_acquire__after_atomic_dec() {}
inline void sk_membar_acquire__after_atomic_conditional_inc() {}

#endif//SkAtomics_DEFINED