aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2016-02-29 10:14:38 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2016-02-29 10:14:38 -0800
commit15923c9e475894d89028b7a6a0b38aeeb9f9e645 (patch)
tree021ee7e8ff6cb8f07301e0f9b68017c29d92b0fe /src
parent1e83a2a239a09cf5f128c18ff559916757a7c719 (diff)
Modernize SkSpinlock.
- Use std::atomic directly. - No more need for SkPODSpinlock or SK_DECLARE_STATIC_SPINLOCK. Now simple code like this works as you'd hope: static SkSpinlock gLock; That is, it starts unlocked and there's no static initializer. std::atomic_flag would make this terser and standard-guaranteed, but ATOMIC_FLAG_INIT caused not-yet-implemented errors on MSVC 2013. The generated code for this approach is identical. It appears the implicit constructor is constexpr when all the member initializers are. I'm hoping this way of producing constexpr constructors without typing "constexpr" gives us a way to eliminate more SkFoo / SkBaseFoo distinctions and SK_DECLARE_STATIC_FOO. This was certainly the easiest. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1734383002 Review URL: https://codereview.chromium.org/1734383002
Diffstat (limited to 'src')
-rw-r--r--src/core/SkSpinlock.cpp8
-rw-r--r--src/gpu/GrProcessor.cpp2
-rw-r--r--src/gpu/batches/GrBatch.cpp2
3 files changed, 7 insertions, 5 deletions
diff --git a/src/core/SkSpinlock.cpp b/src/core/SkSpinlock.cpp
index 0f764278df..eb9d6330aa 100644
--- a/src/core/SkSpinlock.cpp
+++ b/src/core/SkSpinlock.cpp
@@ -7,7 +7,9 @@
#include "SkSpinlock.h"
-void SkPODSpinlock::contendedAcquire() {
- // To act as a mutex, we need an acquire barrier when we take the lock.
- while(sk_atomic_exchange(&fLocked, true, sk_memory_order_acquire)) { /*spin*/ }
+void SkSpinlock::contendedAcquire() {
+ // To act as a mutex, we need an acquire barrier when we acquire the lock.
+ while (fLocked.exchange(true, std::memory_order_acquire)) {
+ /*spin*/
+ }
}
diff --git a/src/gpu/GrProcessor.cpp b/src/gpu/GrProcessor.cpp
index 15206c2c85..aef51908da 100644
--- a/src/gpu/GrProcessor.cpp
+++ b/src/gpu/GrProcessor.cpp
@@ -81,7 +81,7 @@ void GrProcessorTestFactory<GrXPFactory>::VerifyFactoryCount() {
// memory barrier between accesses of a context on different threads. Also, there may be multiple
// GrContexts and those contexts may be in use concurrently on different threads.
namespace {
-SK_DECLARE_STATIC_SPINLOCK(gProcessorSpinlock);
+static SkSpinlock gProcessorSpinlock;
class MemoryPoolAccessor {
public:
MemoryPoolAccessor() { gProcessorSpinlock.acquire(); }
diff --git a/src/gpu/batches/GrBatch.cpp b/src/gpu/batches/GrBatch.cpp
index 19c19ffcf1..8af1c1e088 100644
--- a/src/gpu/batches/GrBatch.cpp
+++ b/src/gpu/batches/GrBatch.cpp
@@ -20,7 +20,7 @@
// memory barrier between accesses of a context on different threads. Also, there may be multiple
// GrContexts and those contexts may be in use concurrently on different threads.
namespace {
-SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock);
+static SkSpinlock gBatchSpinlock;
class MemoryPoolAccessor {
public:
MemoryPoolAccessor() { gBatchSpinlock.acquire(); }