aboutsummaryrefslogtreecommitdiffhomepage
path: root/include/private/SkAtomics.h
diff options
context:
space:
mode:
authorGravatar herb <herb@google.com>2015-09-28 11:24:13 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-09-28 11:24:13 -0700
commite6e41a8a19976a822de36379db23184ff2f28601 (patch)
treea38ffa0ea009999c569795283be068fdd29f6e3b /include/private/SkAtomics.h
parente8dc3f2f0b55693b8a087cd2c35d1c0d64e21181 (diff)
Move SkAtomics.h to private.
There are no API changes. TBR=reed@google.com BUG=skia: Review URL: https://codereview.chromium.org/1369333004
Diffstat (limited to 'include/private/SkAtomics.h')
-rw-r--r--include/private/SkAtomics.h136
1 files changed, 136 insertions, 0 deletions
diff --git a/include/private/SkAtomics.h b/include/private/SkAtomics.h
new file mode 100644
index 0000000000..e947d1a9a4
--- /dev/null
+++ b/include/private/SkAtomics.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkAtomics_DEFINED
+#define SkAtomics_DEFINED
+
+// This file is not part of the public Skia API.
+#include "SkTypes.h"
+
+enum sk_memory_order {
+ sk_memory_order_relaxed,
+ sk_memory_order_consume,
+ sk_memory_order_acquire,
+ sk_memory_order_release,
+ sk_memory_order_acq_rel,
+ sk_memory_order_seq_cst,
+};
+
+template <typename T>
+T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+template <typename T>
+bool sk_atomic_compare_exchange(T*, T* expected, T desired,
+ sk_memory_order success = sk_memory_order_seq_cst,
+ sk_memory_order failure = sk_memory_order_seq_cst);
+
+template <typename T>
+T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);
+
+// A little wrapper class for small T (think, builtins: int, float, void*) to
+// ensure they're always used atomically. This is our stand-in for std::atomic<T>.
+template <typename T>
+class SkAtomic : SkNoncopyable {
+public:
+ SkAtomic() {}
+ explicit SkAtomic(const T& val) : fVal(val) {}
+
+ // It is essential we return by value rather than by const&. fVal may change at any time.
+ T load(sk_memory_order mo = sk_memory_order_seq_cst) const {
+ return sk_atomic_load(&fVal, mo);
+ }
+
+ void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
+ sk_atomic_store(&fVal, val, mo);
+ }
+
+ // Alias for .load(sk_memory_order_seq_cst).
+ operator T() const {
+ return this->load();
+ }
+
+ // Alias for .store(v, sk_memory_order_seq_cst).
+ T operator=(const T& v) {
+ this->store(v);
+ return v;
+ }
+
+ T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
+ return sk_atomic_fetch_add(&fVal, val, mo);
+ }
+
+ T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
+ return sk_atomic_fetch_sub(&fVal, val, mo);
+ }
+
+ bool compare_exchange(T* expected, const T& desired,
+ sk_memory_order success = sk_memory_order_seq_cst,
+ sk_memory_order failure = sk_memory_order_seq_cst) {
+ return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
+ }
+private:
+ T fVal;
+};
+
+// IWYU pragma: begin_exports
+#if defined(_MSC_VER)
+ #include "../ports/SkAtomics_std.h"
+#elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED)
+ #include "../ports/SkAtomics_atomic.h"
+#else
+ #include "../ports/SkAtomics_sync.h"
+#endif
+// IWYU pragma: end_exports
+
+// From here down we have shims for our old atomics API, to be weaned off of.
+// We use the default sequentially-consistent memory order to make things simple
+// and to match the practical reality of our old _sync and _win implementations.
+
+inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
+inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
+inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_add(ptr, v); }
+
+inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }
+
+inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
+ return sk_atomic_compare_exchange(ptr, &expected, desired);
+}
+
+inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
+ (void)sk_atomic_compare_exchange(ptr, &expected, desired);
+ return expected;
+}
+
+inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
+ int32_t prev = sk_atomic_load(ptr);
+ do {
+ if (0 == prev) {
+ break;
+ }
+ } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
+ return prev;
+}
+
+template <typename T>
+T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }
+
+template <typename T>
+void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }
+
+inline void sk_membar_acquire__after_atomic_dec() {}
+inline void sk_membar_acquire__after_atomic_conditional_inc() {}
+
+#endif//SkAtomics_DEFINED