aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Ben Wagner <bungeman@google.com>2017-10-05 13:10:51 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-10-05 19:03:34 +0000
commit04eb02f4055069104ceb330a0722fdaf906e43e4 (patch)
treeae7736ecbf3b0714da6303d56df160da0a2b0cbb /src/core
parent38ace8a1339e7dbc7126887037af11a6d34f6887 (diff)
Fewer atomic ops in debug with SkBufferHead.
In debug builds an assert would do an atomic load to assert the reference count was greater than zero, then a fetch_add would access the value again to do the reference counting. Instead just assert in debug on the value produced by the reference counting. This both improves debug performance and (more importantly) makes the debug asserts correct instead of mearly opprotunistic. Change-Id: Ic4ce788930d2564b5f86ab0e09fcd66006c8b73d Reviewed-on: https://skia-review.googlesource.com/55880 Reviewed-by: Mike Klein <mtklein@chromium.org> Commit-Queue: Ben Wagner <bungeman@google.com>
Diffstat (limited to 'src/core')
-rw-r--r--src/core/SkRWBuffer.cpp8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/core/SkRWBuffer.cpp b/src/core/SkRWBuffer.cpp
index 4167da5819..8c395a1fb5 100644
--- a/src/core/SkRWBuffer.cpp
+++ b/src/core/SkRWBuffer.cpp
@@ -82,14 +82,14 @@ struct SkBufferHead {
}
void ref() const {
- SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0);
- (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ SkAssertResult(fRefCnt.fetch_add(+1, std::memory_order_relaxed));
}
void unref() const {
- SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0);
// A release here acts in place of all releases we "should" have been doing in ref().
- if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
+ int32_t oldRefCnt = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
+ SkASSERT(oldRefCnt);
+ if (1 == oldRefCnt) {
// Like unique(), the acquire is only needed on success.
SkBufferBlock* block = fBlock.fNext;
sk_free((void*)this);