aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/ports/SkAtomics_sync.h
blob: 02b1e580725d9f949ded0b3c6861f07de16fa8dc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
/*
 * Copyright 2015 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#ifndef SkAtomics_sync_DEFINED
#define SkAtomics_sync_DEFINED

// This file is mostly a shim.  We'd like to delete it.  Please don't put much
// effort into maintaining it, and if you find bugs in it, the right fix is to
// delete this file and upgrade your compiler to something that supports
// __atomic builtins or std::atomic.

static inline void barrier(sk_memory_order mo) {
    asm volatile("" : : : "memory");  // Prevents the compiler from reordering code.
    #if SK_CPU_X86
        // On x86, we generally don't need an extra memory barrier for loads or stores.
        if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); }
    #else
        // On other platforms (e.g. ARM) we do unless the memory order is relaxed.
        if (sk_memory_order_relaxed != mo) { __sync_synchronize(); }
    #endif
}

// These barriers only support our majority use cases: acquire and relaxed loads, release stores.
// For anything more complicated, please consider deleting this file and upgrading your compiler.

template <typename T>
T sk_atomic_load(const T* ptr, sk_memory_order mo) {
    T val = *ptr;
    barrier(mo);
    return val;
}

template <typename T>
void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
    barrier(mo);
    *ptr = val;
}

template <typename T>
T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) {
    return __sync_fetch_and_add(ptr, val);
}

template <typename T>
T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order) {
    return __sync_fetch_and_sub(ptr, val);
}

template <typename T>
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) {
    T prev = __sync_val_compare_and_swap(ptr, *expected, desired);
    if (prev == *expected) {
        return true;
    }
    *expected = prev;
    return false;
}

template <typename T>
T sk_atomic_exchange(T* ptr, T val, sk_memory_order) {
    // There is no __sync exchange.  Emulate it with a CAS loop.
    T prev;
    do {
        prev = sk_atomic_load(ptr);
    } while(!sk_atomic_compare_exchange(ptr, &prev, val));
    return prev;
}

#endif//SkAtomics_sync_DEFINED