1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
|
/*
* Copyright 2006 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkThread_DEFINED
#define SkThread_DEFINED
#include "SkTypes.h"
// SK_ATOMICS_PLATFORM_H must provide inline implementations for the following declarations.
/** Atomically adds one to the int referenced by addr and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_inc(int32_t* addr);
/** Atomically adds inc to the int referenced by addr and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_add(int32_t* addr, int32_t inc);
/** Atomically subtracts one from the int referenced by addr and returns the previous value.
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
*/
static int32_t sk_atomic_dec(int32_t* addr);
/** Atomic compare and set.
* If *addr == before, set *addr to after and return true, otherwise return false.
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
*/
static bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after);
/** If sk_atomic_dec does not act as an acquire (L/SL) barrier,
* this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
*/
static void sk_membar_acquire__after_atomic_dec();
/** If sk_atomic_conditional_inc does not act as an acquire (L/SL) barrier,
* this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
*/
static void sk_membar_acquire__after_atomic_conditional_inc();
#include SK_ATOMICS_PLATFORM_H
/** Atomically adds one to the int referenced by addr iff the referenced int was not 0
* and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static inline int32_t sk_atomic_conditional_inc(int32_t* addr) {
int32_t prev;
do {
prev = *addr;
if (0 == prev) {
break;
}
} while (!sk_atomic_cas(addr, prev, prev+1));
return prev;
}
// SK_BARRIERS_PLATFORM_H must provide implementations for the following declarations:
/** Prevent the compiler from reordering across this barrier. */
static void sk_compiler_barrier();
/** Read T*, with at least an acquire barrier.
*
* Only needs to be implemented for T which can be atomically read.
*/
template <typename T> T sk_acquire_load(T*);
/** Write T*, with at least a release barrier.
*
* Only needs to be implemented for T which can be atomically written.
*/
template <typename T> void sk_release_store(T*, T);
#include SK_BARRIERS_PLATFORM_H
/** SK_MUTEX_PLATFORM_H must provide the following (or equivalent) declarations.
class SkBaseMutex {
public:
void acquire();
void release();
};
class SkMutex : SkBaseMutex {
public:
SkMutex();
~SkMutex();
};
#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name = ...
#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name = ...
*/
#include SK_MUTEX_PLATFORM_H
class SkAutoMutexAcquire : SkNoncopyable {
public:
explicit SkAutoMutexAcquire(SkBaseMutex& mutex) : fMutex(&mutex) {
SkASSERT(fMutex != NULL);
mutex.acquire();
}
explicit SkAutoMutexAcquire(SkBaseMutex* mutex) : fMutex(mutex) {
if (mutex) {
mutex->acquire();
}
}
/** If the mutex has not been released, release it now. */
~SkAutoMutexAcquire() {
if (fMutex) {
fMutex->release();
}
}
/** If the mutex has not been released, release it now. */
void release() {
if (fMutex) {
fMutex->release();
fMutex = NULL;
}
}
private:
SkBaseMutex* fMutex;
};
#define SkAutoMutexAcquire(...) SK_REQUIRE_LOCAL_VAR(SkAutoMutexAcquire)
#endif
|