aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/ports/SkThread_pthread.cpp
blob: d0bb3acbade06511c1829f1d5adab043239155e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207

/*
 * Copyright 2011 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */
#include "SkThread.h"
#include "SkTLS.h"

#include <pthread.h>
#include <errno.h>

#ifndef SK_BUILD_FOR_ANDROID

/**
 We prefer the GCC intrinsic implementation of the atomic operations over the
 SkMutex-based implementation. The SkMutex version suffers from static 
 destructor ordering problems.
 Note clang also defines the GCC version macros and implements the intrinsics.
 TODO: Verify that gcc-style __sync_* intrinsics work on ARM
 According to this the intrinsics are supported on ARM in LLVM 2.7+
 http://llvm.org/releases/2.7/docs/ReleaseNotes.html
*/
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || __GNUC__ > 4
    #if (defined(__x86_64) || defined(__i386__))
        #define GCC_INTRINSIC
    #endif
#endif

#if defined(GCC_INTRINSIC)

int32_t sk_atomic_inc(int32_t* addr)
{
    return __sync_fetch_and_add(addr, 1);
}

int32_t sk_atomic_dec(int32_t* addr)
{
    return __sync_fetch_and_add(addr, -1);
}
void sk_membar_aquire__after_atomic_dec() { }

int32_t sk_atomic_conditional_inc(int32_t* addr)
{
    int32_t value = *addr;

    while (true) {
        if (value == 0) {
            return 0;
        }

        int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);

        if (before == value) {
            return value;
        } else {
            value = before;
        }
    }
}
void sk_membar_aquire__after_atomic_conditional_inc() { }

#else

SkMutex gAtomicMutex;

int32_t sk_atomic_inc(int32_t* addr)
{
    SkAutoMutexAcquire ac(gAtomicMutex);

    int32_t value = *addr;
    *addr = value + 1;
    return value;
}

int32_t sk_atomic_dec(int32_t* addr)
{
    SkAutoMutexAcquire ac(gAtomicMutex);

    int32_t value = *addr;
    *addr = value - 1;
    return value;
}
void sk_membar_aquire__after_atomic_dec() { }

int32_t sk_atomic_conditional_inc(int32_t* addr)
{
    SkAutoMutexAcquire ac(gAtomicMutex);

    int32_t value = *addr;
    if (value != 0) ++*addr;
    return value;
}
void sk_membar_aquire__after_atomic_conditional_inc() { }

#endif

#endif // SK_BUILD_FOR_ANDROID

//////////////////////////////////////////////////////////////////////////////

static void print_pthread_error(int status) {
    switch (status) {
    case 0: // success
        break;
    case EINVAL:
        SkDebugf("pthread error [%d] EINVAL\n", status);
        break;
    case EBUSY:
        SkDebugf("pthread error [%d] EBUSY\n", status);
        break;
    default:
        SkDebugf("pthread error [%d] unknown\n", status);
        break;
    }
}

#ifdef SK_USE_POSIX_THREADS

SkMutex::SkMutex() {
    int status;

    status = pthread_mutex_init(&fMutex, NULL);
    if (status != 0) {
        print_pthread_error(status);
        SkASSERT(0 == status);
    }
}

SkMutex::~SkMutex() {
    int status = pthread_mutex_destroy(&fMutex);

    // only report errors on non-global mutexes
    if (status != 0) {
        print_pthread_error(status);
        SkASSERT(0 == status);
    }
}

#else // !SK_USE_POSIX_THREADS

SkMutex::SkMutex() {
    if (sizeof(pthread_mutex_t) > sizeof(fStorage)) {
        SkDEBUGF(("pthread mutex size = %d\n", sizeof(pthread_mutex_t)));
        SkDEBUGFAIL("mutex storage is too small");
    }

    int status;
    pthread_mutexattr_t attr;

    status = pthread_mutexattr_init(&attr);
    print_pthread_error(status);
    SkASSERT(0 == status);
    
    status = pthread_mutex_init((pthread_mutex_t*)fStorage, &attr);
    print_pthread_error(status);
    SkASSERT(0 == status);
}

SkMutex::~SkMutex() {
    int status = pthread_mutex_destroy((pthread_mutex_t*)fStorage);
#if 0
    // only report errors on non-global mutexes
    if (!fIsGlobal) {
        print_pthread_error(status);
        SkASSERT(0 == status);
    }
#endif
}

void SkMutex::acquire() {
    int status = pthread_mutex_lock((pthread_mutex_t*)fStorage);
    print_pthread_error(status);
    SkASSERT(0 == status);
}

void SkMutex::release() {
    int status = pthread_mutex_unlock((pthread_mutex_t*)fStorage);
    print_pthread_error(status);
    SkASSERT(0 == status);
}

#endif // !SK_USE_POSIX_THREADS

///////////////////////////////////////////////////////////////////////////////

static pthread_key_t gSkTLSKey;
static pthread_once_t gSkTLSKey_Once = PTHREAD_ONCE_INIT;

static void sk_tls_make_key() {
    (void)pthread_key_create(&gSkTLSKey, SkTLS::Destructor);
}

void* SkTLS::PlatformGetSpecific(bool forceCreateTheSlot) {
    // should we use forceCreateTheSlot to potentially skip calling pthread_once
    // and just return NULL if we've never been called with
    // forceCreateTheSlot==true ?

    (void)pthread_once(&gSkTLSKey_Once, sk_tls_make_key);
    return pthread_getspecific(gSkTLSKey);
}

void SkTLS::PlatformSetSpecific(void* ptr) {
    (void)pthread_setspecific(gSkTLSKey, ptr);
}