aboutsummaryrefslogtreecommitdiffhomepage
path: root/include
diff options
context:
space:
mode:
authorGravatar bungeman@google.com <bungeman@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-12-18 15:27:39 +0000
committerGravatar bungeman@google.com <bungeman@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-12-18 15:27:39 +0000
commitd9947f605a335363b0a0541d6d8cb7a7113ed788 (patch)
treeadfece12609a5ca87c8e2ea387c0dba31699f2b6 /include
parente2380570de11226a93362d017e93c4790641c027 (diff)
Split atomic and mutex implementations and make inlinable.
Skia cannot use Chromium's implementation of mutex (Lock) due to static initializers. However, we would like to be able to use Chromium's implementation of atomics. This motivates the split of implementation. Skia's atomic and mutex calls should be inlinable, especially the atomics. These calls often compile down to very few instructions, and we currently have the overhead of a function call. This motivates the header implementation. There is still a desire for the build system to select the implementation, so the SK_XXX_PLATFORM_H pattern for header files is introduced. This allows the build system to control which platform specific header files are chosen. The Chromium side changes (most of which will need to go in before this change can be found at https://codereview.chromium.org/19477005/ . The Chromium side changes after this lands can be seen at https://codereview.chromium.org/98073013 . Review URL: https://codereview.chromium.org/19808007 git-svn-id: http://skia.googlecode.com/svn/trunk@12738 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'include')
-rw-r--r--include/config/SkUserConfig.h9
-rw-r--r--include/core/SkInstCnt.h2
-rw-r--r--include/core/SkPostConfig.h20
-rw-r--r--include/core/SkRefCnt.h6
-rw-r--r--include/core/SkThread.h70
-rw-r--r--include/core/SkThread_platform.h194
-rw-r--r--include/core/SkWeakRefCnt.h8
-rw-r--r--include/gpu/GrBackendEffectFactory.h2
8 files changed, 90 insertions, 221 deletions
diff --git a/include/config/SkUserConfig.h b/include/config/SkUserConfig.h
index 72994e4060..534c79dde1 100644
--- a/include/config/SkUserConfig.h
+++ b/include/config/SkUserConfig.h
@@ -186,4 +186,13 @@
*/
//#define SK_PDF_USE_PATHOPS
+/* Skia uses these defines as the target of include preprocessor directives.
+ * The header files pointed to by these defines provide declarations and
+ * possibly inline implementations of threading primitives.
+ *
+ * See SkThread.h for documentation on what these includes must contain.
+ */
+//#define SK_ATOMICS_PLATFORM_H "SkAtomics_xxx.h"
+//#define SK_MUTEX_PLATFORM_H "SkMutex_xxx.h"
+
#endif
diff --git a/include/core/SkInstCnt.h b/include/core/SkInstCnt.h
index e38c42d917..89bbfa1126 100644
--- a/include/core/SkInstCnt.h
+++ b/include/core/SkInstCnt.h
@@ -21,7 +21,7 @@
#if SK_ENABLE_INST_COUNT
#include "SkTArray.h"
-#include "SkThread_platform.h"
+#include "SkThread.h"
extern bool gPrintInstCount;
diff --git a/include/core/SkPostConfig.h b/include/core/SkPostConfig.h
index 0d904ea51c..323d1e8b6d 100644
--- a/include/core/SkPostConfig.h
+++ b/include/core/SkPostConfig.h
@@ -371,4 +371,24 @@
# define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
#endif
+//////////////////////////////////////////////////////////////////////
+
+#ifndef SK_ATOMICS_PLATFORM_H
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_ATOMICS_PLATFORM_H "../../src/ports/SkAtomics_win.h"
+# elif defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+# define SK_ATOMICS_PLATFORM_H "../../src/ports/SkAtomics_android.h"
+# else
+# define SK_ATOMICS_PLATFORM_H "../../src/ports/SkAtomics_sync.h"
+# endif
+#endif
+
+#ifndef SK_MUTEX_PLATFORM_H
+# if defined(SK_BUILD_FOR_WIN)
+# define SK_MUTEX_PLATFORM_H "../../src/ports/SkMutex_win.h"
+# else
+# define SK_MUTEX_PLATFORM_H "../../src/ports/SkMutex_pthread.h"
+# endif
+#endif
+
#endif // SkPostConfig_DEFINED
diff --git a/include/core/SkRefCnt.h b/include/core/SkRefCnt.h
index b010faf760..28591920a6 100644
--- a/include/core/SkRefCnt.h
+++ b/include/core/SkRefCnt.h
@@ -50,7 +50,7 @@ public:
bool unique() const {
bool const unique = (1 == fRefCnt);
if (unique) {
- // Aquire barrier (L/SL), if not provided by load of fRefCnt.
+ // Acquire barrier (L/SL), if not provided by load of fRefCnt.
// Prevents user's 'unique' code from happening before decrements.
//TODO: issue the barrier.
}
@@ -72,9 +72,9 @@ public:
SkASSERT(fRefCnt > 0);
// Release barrier (SL/S), if not provided below.
if (sk_atomic_dec(&fRefCnt) == 1) {
- // Aquire barrier (L/SL), if not provided above.
+ // Acquire barrier (L/SL), if not provided above.
// Prevents code in dispose from happening before the decrement.
- sk_membar_aquire__after_atomic_dec();
+ sk_membar_acquire__after_atomic_dec();
internal_dispose();
}
}
diff --git a/include/core/SkThread.h b/include/core/SkThread.h
index 487c2bdf9e..412ace31eb 100644
--- a/include/core/SkThread.h
+++ b/include/core/SkThread.h
@@ -1,4 +1,3 @@
-
/*
* Copyright 2006 The Android Open Source Project
*
@@ -6,30 +5,67 @@
* found in the LICENSE file.
*/
-
#ifndef SkThread_DEFINED
#define SkThread_DEFINED
#include "SkTypes.h"
-#include "SkThread_platform.h"
-/****** SkThread_platform needs to define the following...
+// SK_ATOMICS_PLATFORM_H must provide inline implementations for the following declarations.
+
+/** Atomically adds one to the int referenced by addr and returns the previous value.
+ * No additional memory barrier is required; this must act as a compiler barrier.
+ */
+static int32_t sk_atomic_inc(int32_t* addr);
+
+/** Atomically adds inc to the int referenced by addr and returns the previous value.
+ * No additional memory barrier is required; this must act as a compiler barrier.
+ */
+static int32_t sk_atomic_add(int32_t* addr, int32_t inc);
+
+/** Atomically subtracts one from the int referenced by addr and returns the previous value.
+ * This must act as a release (SL/S) memory barrier and as a compiler barrier.
+ */
+static int32_t sk_atomic_dec(int32_t* addr);
+
+/** Atomically adds one to the int referenced by addr iff the referenced int was not 0
+ * and returns the previous value.
+ * No additional memory barrier is required; this must act as a compiler barrier.
+ */
+static int32_t sk_atomic_conditional_inc(int32_t* addr);
+
+/** If sk_atomic_dec does not act as an acquire (L/SL) barrier,
+ * this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
+ */
+static void sk_membar_acquire__after_atomic_dec();
+
+/** If sk_atomic_conditional_inc does not act as an acquire (L/SL) barrier,
+ * this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
+ */
+static void sk_membar_acquire__after_atomic_conditional_inc();
-int32_t sk_atomic_inc(int32_t*);
-int32_t sk_atomic_add(int32_t*, int32_t);
-int32_t sk_atomic_dec(int32_t*);
-int32_t sk_atomic_conditional_inc(int32_t*);
+#include SK_ATOMICS_PLATFORM_H
-class SkMutex {
+
+/** SK_MUTEX_PLATFORM_H must provide the following (or equivalent) declarations.
+
+class SkBaseMutex {
+public:
+ void acquire();
+ void release();
+};
+
+class SkMutex : SkBaseMutex {
public:
SkMutex();
~SkMutex();
-
- void acquire();
- void release();
};
-****************/
+#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name = ...
+#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name = ...
+*/
+
+#include SK_MUTEX_PLATFORM_H
+
class SkAutoMutexAcquire : SkNoncopyable {
public:
@@ -38,22 +74,20 @@ public:
mutex.acquire();
}
- SkAutoMutexAcquire(SkBaseMutex* mutex) : fMutex(mutex) {
+ explicit SkAutoMutexAcquire(SkBaseMutex* mutex) : fMutex(mutex) {
if (mutex) {
mutex->acquire();
}
}
- /** If the mutex has not been release, release it now.
- */
+ /** If the mutex has not been released, release it now. */
~SkAutoMutexAcquire() {
if (fMutex) {
fMutex->release();
}
}
- /** If the mutex has not been release, release it now.
- */
+ /** If the mutex has not been released, release it now. */
void release() {
if (fMutex) {
fMutex->release();
diff --git a/include/core/SkThread_platform.h b/include/core/SkThread_platform.h
deleted file mode 100644
index 7df778cb5e..0000000000
--- a/include/core/SkThread_platform.h
+++ /dev/null
@@ -1,194 +0,0 @@
-
-/*
- * Copyright 2006 The Android Open Source Project
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-
-#ifndef SkThread_platform_DEFINED
-#define SkThread_platform_DEFINED
-
-#if defined(SK_BUILD_FOR_ANDROID)
-
-#if !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
-
-#include <stdint.h>
-
-/* Just use the GCC atomic intrinsics. They're supported by the NDK toolchain,
- * have reasonable performance, and provide full memory barriers
- */
-static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t *addr) {
- return __sync_fetch_and_add(addr, 1);
-}
-
-static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t *addr, int32_t inc) {
- return __sync_fetch_and_add(addr, inc);
-}
-
-static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr) {
- return __sync_fetch_and_add(addr, -1);
-}
-static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_dec() { }
-
-static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
- int32_t value = *addr;
-
- while (true) {
- if (value == 0) {
- return 0;
- }
-
- int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
-
- if (before == value) {
- return value;
- } else {
- value = before;
- }
- }
-}
-static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_conditional_inc() { }
-
-#else // SK_BUILD_FOR_ANDROID_FRAMEWORK
-
-/* The platform atomics operations are slightly more efficient than the
- * GCC built-ins, so use them.
- */
-#include <utils/Atomic.h>
-
-#define sk_atomic_inc(addr) android_atomic_inc(addr)
-#define sk_atomic_add(addr, inc) android_atomic_add(inc, addr)
-#define sk_atomic_dec(addr) android_atomic_dec(addr)
-
-static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_dec() {
- //HACK: Android is actually using full memory barriers.
- // Should this change, uncomment below.
- //int dummy;
- //android_atomic_aquire_store(0, &dummy);
-}
-static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
- while (true) {
- int32_t value = *addr;
- if (value == 0) {
- return 0;
- }
- if (0 == android_atomic_release_cas(value, value + 1, addr)) {
- return value;
- }
- }
-}
-static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_conditional_inc() {
- //HACK: Android is actually using full memory barriers.
- // Should this change, uncomment below.
- //int dummy;
- //android_atomic_aquire_store(0, &dummy);
-}
-
-#endif // SK_BUILD_FOR_ANDROID_FRAMEWORK
-
-#else // !SK_BUILD_FOR_ANDROID
-
-/** Implemented by the porting layer, this function adds one to the int
- specified by the address (in a thread-safe manner), and returns the
- previous value.
- No additional memory barrier is required.
- This must act as a compiler barrier.
-*/
-SK_API int32_t sk_atomic_inc(int32_t* addr);
-
-/** Implemented by the porting layer, this function adds inc to the int
- specified by the address (in a thread-safe manner), and returns the
- previous value.
- No additional memory barrier is required.
- This must act as a compiler barrier.
- */
-SK_API int32_t sk_atomic_add(int32_t* addr, int32_t inc);
-
-/** Implemented by the porting layer, this function subtracts one from the int
- specified by the address (in a thread-safe manner), and returns the
- previous value.
- Expected to act as a release (SL/S) memory barrier and a compiler barrier.
-*/
-SK_API int32_t sk_atomic_dec(int32_t* addr);
-/** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected
- to act as an aquire (L/SL) memory barrier and as a compiler barrier.
-*/
-SK_API void sk_membar_aquire__after_atomic_dec();
-
-/** Implemented by the porting layer, this function adds one to the int
- specified by the address iff the int specified by the address is not zero
- (in a thread-safe manner), and returns the previous value.
- No additional memory barrier is required.
- This must act as a compiler barrier.
-*/
-SK_API int32_t sk_atomic_conditional_inc(int32_t*);
-/** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this
- is expected to act as an aquire (L/SL) memory barrier and as a compiler
- barrier.
-*/
-SK_API void sk_membar_aquire__after_atomic_conditional_inc();
-
-#endif // !SK_BUILD_FOR_ANDROID
-
-#ifdef SK_USE_POSIX_THREADS
-
-#include <pthread.h>
-
-// A SkBaseMutex is a POD structure that can be directly initialized
-// at declaration time with SK_DECLARE_STATIC/GLOBAL_MUTEX. This avoids the
-// generation of a static initializer in the final machine code (and
-// a corresponding static finalizer).
-//
-struct SkBaseMutex {
- void acquire() { pthread_mutex_lock(&fMutex); }
- void release() { pthread_mutex_unlock(&fMutex); }
- pthread_mutex_t fMutex;
-};
-
-// Using POD-style initialization prevents the generation of a static initializer
-// and keeps the acquire() implementation small and fast.
-#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name = { PTHREAD_MUTEX_INITIALIZER }
-
-// Special case used when the static mutex must be available globally.
-#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name = { PTHREAD_MUTEX_INITIALIZER }
-
-// A normal mutex that requires to be initialized through normal C++ construction,
-// i.e. when it's a member of another class, or allocated on the heap.
-class SK_API SkMutex : public SkBaseMutex, SkNoncopyable {
-public:
- SkMutex();
- ~SkMutex();
-};
-
-#else // !SK_USE_POSIX_THREADS
-
-// In the generic case, SkBaseMutex and SkMutex are the same thing, and we
-// can't easily get rid of static initializers.
-//
-class SK_API SkMutex : SkNoncopyable {
-public:
- SkMutex();
- ~SkMutex();
-
- void acquire();
- void release();
-
-private:
- bool fIsGlobal;
- enum {
- kStorageIntCount = 64
- };
- uint32_t fStorage[kStorageIntCount];
-};
-
-typedef SkMutex SkBaseMutex;
-
-#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name
-#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name
-
-#endif // !SK_USE_POSIX_THREADS
-
-
-#endif
diff --git a/include/core/SkWeakRefCnt.h b/include/core/SkWeakRefCnt.h
index e2a7308443..210dcc9f1c 100644
--- a/include/core/SkWeakRefCnt.h
+++ b/include/core/SkWeakRefCnt.h
@@ -89,9 +89,9 @@ public:
*/
bool SK_WARN_UNUSED_RESULT try_ref() const {
if (sk_atomic_conditional_inc(&fRefCnt) != 0) {
- // Aquire barrier (L/SL), if not provided above.
+ // Acquire barrier (L/SL), if not provided above.
// Prevents subsequent code from happening before the increment.
- sk_membar_aquire__after_atomic_conditional_inc();
+ sk_membar_acquire__after_atomic_conditional_inc();
return true;
}
return false;
@@ -115,9 +115,9 @@ public:
SkASSERT(fWeakCnt > 0);
// Release barrier (SL/S), if not provided below.
if (sk_atomic_dec(&fWeakCnt) == 1) {
- // Aquire barrier (L/SL), if not provided above.
+ // Acquire barrier (L/SL), if not provided above.
// Prevents code in destructor from happening before the decrement.
- sk_membar_aquire__after_atomic_dec();
+ sk_membar_acquire__after_atomic_dec();
#ifdef SK_DEBUG
// so our destructor won't complain
fWeakCnt = 1;
diff --git a/include/gpu/GrBackendEffectFactory.h b/include/gpu/GrBackendEffectFactory.h
index b3f52fbb04..d115a54647 100644
--- a/include/gpu/GrBackendEffectFactory.h
+++ b/include/gpu/GrBackendEffectFactory.h
@@ -10,7 +10,7 @@
#include "GrTypes.h"
#include "SkTemplates.h"
-#include "SkThread_platform.h"
+#include "SkThread.h"
#include "SkTypes.h"
/** Given a GrEffect of a particular type, creates the corresponding graphics-backend-specific