aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/ports/SkAtomics_sync.h
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-02-02 12:22:07 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2015-02-02 12:22:07 -0800
commita669bc7a7ae7580c5cd92067aeb95d09e64ea720 (patch)
tree2ab6086a84d997e7dd01b858a5d9f6bbc43f57c9 /include/ports/SkAtomics_sync.h
parent465206af184f58e8097e7f4f414b791232627c31 (diff)
Atomics overhaul.
This merges and refactors SkAtomics.h and SkBarriers.h into SkAtomics.h and some ports/ implementations. The major new feature is that we can express memory orders explicitly rather than only through comments. The porting layer is reduced to four template functions: - sk_atomic_load - sk_atomic_store - sk_atomic_fetch_add - sk_atomic_compare_exchange From those four we can reconstruct all our previous sk_atomic_foo. There are three ports: - SkAtomics_std: uses C++11 <atomic>, used with MSVC - SkAtomics_atomic: uses newer GCC/Clang intrinsics, used on not-MSVC where possible - SkAtomics_sync: uses older GCC/Clang intrinsics, used where SkAtomics_atomic not supported No public API changes. TBR=reed@google.com BUG=skia: Review URL: https://codereview.chromium.org/896553002
Diffstat (limited to 'include/ports/SkAtomics_sync.h')
-rw-r--r--include/ports/SkAtomics_sync.h78
1 files changed, 37 insertions, 41 deletions
diff --git a/include/ports/SkAtomics_sync.h b/include/ports/SkAtomics_sync.h
index 9389c00103..66da4d35ee 100644
--- a/include/ports/SkAtomics_sync.h
+++ b/include/ports/SkAtomics_sync.h
@@ -1,55 +1,51 @@
-/*
- * Copyright 2013 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
#ifndef SkAtomics_sync_DEFINED
#define SkAtomics_sync_DEFINED
-/** GCC/Clang __sync based atomics. */
-
-#include <stdint.h>
-
-static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t* addr) {
- return __sync_fetch_and_add(addr, 1);
+// This file is mostly a shim. We'd like to delete it. Please don't put much
+// effort into maintaining it, and if you find bugs in it, the right fix is to
+// delete this file and upgrade your compiler to something that supports
+// __atomic builtins or std::atomic.
+
+static inline void barrier(sk_memory_order mo) {
+ asm volatile("" : : : "memory"); // Prevents the compiler from reordering code.
+ #if SK_CPU_X86
+ // On x86, we generally don't need an extra memory barrier for loads or stores.
+ if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); }
+ #else
+ // On other platforms (e.g. ARM) we do unless the memory order is relaxed.
+ if (sk_memory_order_relaxed != mo) { __sync_synchronize(); }
+ #endif
}
-static inline __attribute__((always_inline)) int64_t sk_atomic_inc(int64_t* addr) {
-#if defined(__mips__) && !defined(__LP64__) && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- /** Some versions of the GCC 32-bit MIPS toolchains (e.g. 4.8) for android are missing
- * support for the __sync* functions that operate on 64-bit values. The workaround
- * is to use __atomic* functions until we can move everything to <stdatomic.h>.
- */
- return __atomic_fetch_add(addr, 1, __ATOMIC_SEQ_CST);
-#else
- return __sync_fetch_and_add(addr, 1);
-#endif
-}
+// These barriers only support our majority use cases: acquire and relaxed loads, release stores.
+// For anything more complicated, please consider deleting this file and upgrading your compiler.
-static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t* addr, int32_t inc) {
- return __sync_fetch_and_add(addr, inc);
+template <typename T>
+T sk_atomic_load(const T* ptr, sk_memory_order mo) {
+ T val = *ptr;
+ barrier(mo);
+ return val;
}
-static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t* addr) {
- return __sync_fetch_and_add(addr, -1);
+template <typename T>
+void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
+ barrier(mo);
+ *ptr = val;
}
-static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_dec() { }
-
-static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr,
- int32_t before,
- int32_t after) {
- return __sync_bool_compare_and_swap(addr, before, after);
+template <typename T>
+T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) {
+ return __sync_fetch_and_add(ptr, val);
}
-static inline __attribute__((always_inline)) void* sk_atomic_cas(void** addr,
- void* before,
- void* after) {
- return __sync_val_compare_and_swap(addr, before, after);
+template <typename T>
+bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) {
+ T prev = __sync_val_compare_and_swap(ptr, *expected, desired);
+ if (prev == *expected) {
+ return true;
+ }
+ *expected = prev;
+ return false;
}
-static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_conditional_inc() { }
-
-#endif
+#endif//SkAtomics_sync_DEFINED