aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar herb <herb@google.com>2015-08-19 13:40:12 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-08-19 13:40:12 -0700
commitab42ec79d5dbf38b81394c55670b495cdf78b243 (patch)
tree913616ada7bb03fba1d8a09d5f0918bb51f0b71d /src
parentadd79ef7cb2ebd7b80bf0fd5e70281ec6ad5f079 (diff)
Add asserts for shared mutex.
Diffstat (limited to 'src')
-rw-r--r--src/core/SkSharedMutex.cpp30
-rw-r--r--src/core/SkSharedMutex.h15
2 files changed, 45 insertions, 0 deletions
diff --git a/src/core/SkSharedMutex.cpp b/src/core/SkSharedMutex.cpp
index b9af10a2be..4cf6312067 100644
--- a/src/core/SkSharedMutex.cpp
+++ b/src/core/SkSharedMutex.cpp
@@ -141,6 +141,23 @@ void SkSharedMutex::release() {
}
}
+#ifdef SK_DEBUG
+void SkSharedMutex::assertHeld() const {
+ int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
+ // These are very loose asserts about the mutex being held exclusively.
+ SkASSERTF(0 == (queueCounts & kSharedMask),
+ "running shared: %d, exclusive: %d, waiting shared: %d",
+ (queueCounts & kSharedMask) >> kSharedOffset,
+ (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
+ (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
+ SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0,
+ "running shared: %d, exclusive: %d, waiting shared: %d",
+ (queueCounts & kSharedMask) >> kSharedOffset,
+ (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
+ (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
+}
+#endif
+
void SkSharedMutex::acquireShared() {
int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
int32_t newQueueCounts;
@@ -177,3 +194,16 @@ void SkSharedMutex::releaseShared() {
fExclusiveQueue.signal();
}
}
+
+#ifdef SK_DEBUG
+void SkSharedMutex::assertHeldShared() const {
+ int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
+ // A very loose assert about the mutex being shared.
+ SkASSERTF((queueCounts & kSharedMask) > 0,
+ "running shared: %d, exclusive: %d, waiting shared: %d",
+ (queueCounts & kSharedMask) >> kSharedOffset,
+ (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
+ (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
+}
+
+#endif
diff --git a/src/core/SkSharedMutex.h b/src/core/SkSharedMutex.h
index a3535dca43..f3430040e3 100644
--- a/src/core/SkSharedMutex.h
+++ b/src/core/SkSharedMutex.h
@@ -28,16 +28,31 @@ public:
// Release lock for exclusive use.
void release();
+ // Fail if exclusive is not held.
+#ifdef SK_DEBUG
+ void assertHeld() const;
+#else
+ void assertHeld() const {}
+#endif
+
// Acquire lock for shared use.
void acquireShared();
// Release lock for shared use.
void releaseShared();
+ // Fail if shared lock not held.
+#ifdef SK_DEBUG
+ void assertHeldShared() const;
+#else
+ void assertHeldShared() const {}
+#endif
+
private:
SkAtomic<int32_t> fQueueCounts;
SkSemaphore fSharedQueue;
SkSemaphore fExclusiveQueue;
};
+
#endif // SkSharedLock_DEFINED