aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/private/SkAtomics.h
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-11-12 11:07:53 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2015-11-12 11:07:53 -0800
commit23267db67824a92f045649f571ae1ceaf2cc2b28 (patch)
tree6058957cf726013eaf2c8bdb919d9b98f6a0a194 /include/private/SkAtomics.h
parent64593525debc63339e1bf9ddb8a0e998f7d976a3 (diff)
SkAtomic: always use std::atomic
We were doing it on Windows, now do it everywhere. This just changes the backend. We could think about another step to actually replacing all our sk_atomic_... with std atomic stuff. CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-TSAN-Trybot TBR=reed@google.com Only deleting from include/... Review URL: https://codereview.chromium.org/1441773002
Diffstat (limited to 'include/private/SkAtomics.h')
-rw-r--r--include/private/SkAtomics.h71
1 files changed, 62 insertions, 9 deletions
diff --git a/include/private/SkAtomics.h b/include/private/SkAtomics.h
index 56eace4447..249723d785 100644
--- a/include/private/SkAtomics.h
+++ b/include/private/SkAtomics.h
@@ -10,6 +10,9 @@
// This file is not part of the public Skia API.
#include "SkTypes.h"
+#include <atomic>
+
+// ~~~~~~~~ APIs ~~~~~~~~~
enum sk_memory_order {
sk_memory_order_relaxed,
@@ -86,15 +89,65 @@ private:
T fVal;
};
-// IWYU pragma: begin_exports
-#if defined(_MSC_VER)
- #include "../ports/SkAtomics_std.h"
-#elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED)
- #include "../ports/SkAtomics_atomic.h"
-#else
- #include "../ports/SkAtomics_sync.h"
-#endif
-// IWYU pragma: end_exports
+// ~~~~~~~~ Implementations ~~~~~~~~~
+
+template <typename T>
+T sk_atomic_load(const T* ptr, sk_memory_order mo) {
+ SkASSERT(mo == sk_memory_order_relaxed ||
+ mo == sk_memory_order_seq_cst ||
+ mo == sk_memory_order_acquire ||
+ mo == sk_memory_order_consume);
+ const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
+ return std::atomic_load_explicit(ap, (std::memory_order)mo);
+}
+
+template <typename T>
+void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
+ SkASSERT(mo == sk_memory_order_relaxed ||
+ mo == sk_memory_order_seq_cst ||
+ mo == sk_memory_order_release);
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
+}
+
+template <typename T>
+T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
+ // All values of mo are valid.
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
+}
+
+template <typename T>
+T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) {
+ // All values of mo are valid.
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo);
+}
+
+template <typename T>
+bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
+ sk_memory_order success,
+ sk_memory_order failure) {
+ // All values of success are valid.
+ SkASSERT(failure == sk_memory_order_relaxed ||
+ failure == sk_memory_order_seq_cst ||
+ failure == sk_memory_order_acquire ||
+ failure == sk_memory_order_consume);
+ SkASSERT(failure <= success);
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
+ (std::memory_order)success,
+ (std::memory_order)failure);
+}
+
+template <typename T>
+T sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) {
+ // All values of mo are valid.
+ std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
+ return std::atomic_exchange_explicit(ap, val, (std::memory_order)mo);
+}
+
+// ~~~~~~~~ Legacy APIs ~~~~~~~~~
// From here down we have shims for our old atomics API, to be weaned off of.
// We use the default sequentially-consistent memory order to make things simple