aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--bench/RefCntBench.cpp123
-rw-r--r--gyp/core.gyp1
-rw-r--r--gyp/ports.gyp8
-rw-r--r--include/core/SkRefCnt.h39
-rw-r--r--include/core/SkThread.h1
-rw-r--r--include/core/SkThread_platform.h75
-rw-r--r--include/core/SkTypeface.h6
-rw-r--r--include/core/SkWeakRefCnt.h155
-rw-r--r--src/core/SkTypefaceCache.cpp47
-rw-r--r--src/core/SkTypefaceCache.h11
-rw-r--r--src/ports/SkThread_none.cpp8
-rw-r--r--src/ports/SkThread_pthread.cpp32
-rw-r--r--src/ports/SkThread_win.cpp17
-rw-r--r--tests/RefCntTest.cpp53
14 files changed, 532 insertions, 44 deletions
diff --git a/bench/RefCntBench.cpp b/bench/RefCntBench.cpp
index 44fb648f4e..f21317a21a 100644
--- a/bench/RefCntBench.cpp
+++ b/bench/RefCntBench.cpp
@@ -5,7 +5,9 @@
* found in the LICENSE file.
*/
#include "SkBenchmark.h"
+#include "SkRefCnt.h"
#include "SkThread.h"
+#include "SkWeakRefCnt.h"
#include <memory>
enum {
@@ -67,11 +69,124 @@ private:
typedef SkBenchmark INHERITED;
};
+class RefCntBench_New : public SkBenchmark {
+public:
+ RefCntBench_New(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_new";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ for (int i = 0; i < N; ++i) {
+ SkRefCnt* ref = new SkRefCnt();
+ for (int j = 0; j < M; ++j) {
+ ref->ref();
+ ref->unref();
+ }
+ ref->unref();
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class WeakRefCntBench_Stack : public SkBenchmark {
+public:
+ WeakRefCntBench_Stack(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_stack_weak";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ for (int i = 0; i < N; ++i) {
+ SkWeakRefCnt ref;
+ for (int j = 0; j < M; ++j) {
+ ref.ref();
+ ref.unref();
+ }
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+class PlacedWeakRefCnt : public SkWeakRefCnt {
+public:
+ PlacedWeakRefCnt() : SkWeakRefCnt() { }
+ void operator delete(void *p) { }
+};
+
+class WeakRefCntBench_Heap : public SkBenchmark {
+public:
+ WeakRefCntBench_Heap(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_heap_weak";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ char memory[sizeof(PlacedWeakRefCnt)];
+ for (int i = 0; i < N; ++i) {
+ PlacedWeakRefCnt* ref = new (memory) PlacedWeakRefCnt();
+ for (int j = 0; j < M; ++j) {
+ ref->ref();
+ ref->unref();
+ }
+ ref->unref();
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+class WeakRefCntBench_New : public SkBenchmark {
+public:
+ WeakRefCntBench_New(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_new_weak";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ for (int i = 0; i < N; ++i) {
+ SkWeakRefCnt* ref = new SkWeakRefCnt();
+ for (int j = 0; j < M; ++j) {
+ ref->ref();
+ ref->unref();
+ }
+ ref->unref();
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
///////////////////////////////////////////////////////////////////////////////
-static SkBenchmark* Fact0(void* p) { return new RefCntBench_Stack(p); }
-static SkBenchmark* Fact1(void* p) { return new RefCntBench_Heap(p); }
+static SkBenchmark* Fact00(void* p) { return new RefCntBench_Stack(p); }
+static SkBenchmark* Fact01(void* p) { return new RefCntBench_Heap(p); }
+static SkBenchmark* Fact02(void* p) { return new RefCntBench_New(p); }
+
+static SkBenchmark* Fact10(void* p) { return new WeakRefCntBench_Stack(p); }
+static SkBenchmark* Fact11(void* p) { return new WeakRefCntBench_Heap(p); }
+static SkBenchmark* Fact12(void* p) { return new WeakRefCntBench_New(p); }
-static BenchRegistry gReg01(Fact0);
-static BenchRegistry gReg02(Fact1);
+static BenchRegistry gReg00(Fact00);
+static BenchRegistry gReg01(Fact01);
+static BenchRegistry gReg02(Fact02);
+static BenchRegistry gReg10(Fact10);
+static BenchRegistry gReg11(Fact11);
+static BenchRegistry gReg12(Fact12);
diff --git a/gyp/core.gyp b/gyp/core.gyp
index c2d98f089a..c9ff391aba 100644
--- a/gyp/core.gyp
+++ b/gyp/core.gyp
@@ -233,6 +233,7 @@
'../include/core/SkUnPreMultiply.h',
'../include/core/SkUnitMapper.h',
'../include/core/SkUtils.h',
+ '../include/core/SkWeakRefCnt.h',
'../include/core/SkWriter32.h',
'../include/core/SkXfermode.h',
],
diff --git a/gyp/ports.gyp b/gyp/ports.gyp
index 73dd59497d..8b29acb852 100644
--- a/gyp/ports.gyp
+++ b/gyp/ports.gyp
@@ -4,14 +4,18 @@
{
'target_name': 'ports',
'type': 'static_library',
+ 'dependencies': [
+ 'core.gyp:core',
+ 'sfnt.gyp:sfnt',
+ 'utils.gyp:utils',
+ ],
'include_dirs': [
- '../include/config',
- '../include/core',
'../include/images',
'../include/effects',
'../include/ports',
'../include/xml',
'../src/core',
+ '../src/utils',
],
'sources': [
'../src/ports/SkDebug_stdio.cpp',
diff --git a/include/core/SkRefCnt.h b/include/core/SkRefCnt.h
index 71a2443584..384b3be2eb 100644
--- a/include/core/SkRefCnt.h
+++ b/include/core/SkRefCnt.h
@@ -15,12 +15,12 @@
/** \class SkRefCnt
SkRefCnt is the base class for objects that may be shared by multiple
- objects. When a new owner wants a reference, it calls ref(). When an owner
- wants to release its reference, it calls unref(). When the shared object's
- reference count goes to zero as the result of an unref() call, its (virtual)
- destructor is called. It is an error for the destructor to be called
- explicitly (or via the object going out of scope on the stack or calling
- delete) if getRefCnt() > 1.
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
*/
class SK_API SkRefCnt : SkNoncopyable {
public:
@@ -28,7 +28,7 @@ public:
*/
SkRefCnt() : fRefCnt(1) {}
- /** Destruct, asserting that the reference count is 1.
+ /** Destruct, asserting that the reference count is 1.
*/
virtual ~SkRefCnt() {
#ifdef SK_DEBUG
@@ -45,19 +45,21 @@ public:
*/
void ref() const {
SkASSERT(fRefCnt > 0);
- sk_atomic_inc(&fRefCnt);
+ sk_atomic_inc(&fRefCnt); // No barrier required.
}
/** Decrement the reference count. If the reference count is 1 before the
- decrement, then call delete on the object. Note that if this is the
- case, then the object needs to have been allocated via new, and not on
- the stack.
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
*/
void unref() const {
SkASSERT(fRefCnt > 0);
+ // Release barrier (SL/S), if not provided below.
if (sk_atomic_dec(&fRefCnt) == 1) {
- fRefCnt = 1; // so our destructor won't complain
- SkDELETE(this);
+ // Aquire barrier (L/SL), if not provided above.
+ // Prevents code in dispose from happening before the decrement.
+ sk_membar_aquire__after_atomic_dec();
+ internal_dispose();
}
}
@@ -66,6 +68,17 @@ public:
}
private:
+ /** Called when the ref count goes to 0.
+ */
+ virtual void internal_dispose() const {
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fRefCnt = 1;
+#endif
+ SkDELETE(this);
+ }
+ friend class SkWeakRefCnt;
+
mutable int32_t fRefCnt;
};
diff --git a/include/core/SkThread.h b/include/core/SkThread.h
index 262ce8ba9a..2fd5052b06 100644
--- a/include/core/SkThread.h
+++ b/include/core/SkThread.h
@@ -17,6 +17,7 @@
int32_t sk_atomic_inc(int32_t*);
int32_t sk_atomic_dec(int32_t*);
+int32_t sk_atomic_conditional_inc(int32_t*);
class SkMutex {
public:
diff --git a/include/core/SkThread_platform.h b/include/core/SkThread_platform.h
index 863f6e3770..df91747738 100644
--- a/include/core/SkThread_platform.h
+++ b/include/core/SkThread_platform.h
@@ -26,6 +26,26 @@ static __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t *addr) {
static __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr) {
return __sync_fetch_and_add(addr, -1);
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+static __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ int32_t value = *addr;
+
+ while (true) {
+ if (value == 0) {
+ return 0;
+ }
+
+ int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
+
+ if (before == value) {
+ return value;
+ } else {
+ value = before;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
#else // !SK_BUILD_FOR_ANDROID_NDK
@@ -36,20 +56,65 @@ static __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr) {
#define sk_atomic_inc(addr) android_atomic_inc(addr)
#define sk_atomic_dec(addr) android_atomic_dec(addr)
+void sk_membar_aquire__after_atomic_dec() {
+ //HACK: Android is actually using full memory barriers.
+ // Should this change, uncomment below.
+ //int dummy;
+ //android_atomic_aquire_store(0, &dummy);
+}
+int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ while (true) {
+ int32_t value = *addr;
+ if (value == 0) {
+ return 0;
+ }
+ if (0 == android_atomic_release_cas(value, value + 1, addr)) {
+ return value;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() {
+ //HACK: Android is actually using full memory barriers.
+ // Should this change, uncomment below.
+ //int dummy;
+ //android_atomic_aquire_store(0, &dummy);
+}
#endif // !SK_BUILD_FOR_ANDROID_NDK
#else // !SK_BUILD_FOR_ANDROID
-/** Implemented by the porting layer, this function adds 1 to the int specified
- by the address (in a thread-safe manner), and returns the previous value.
+/** Implemented by the porting layer, this function adds one to the int
+ specified by the address (in a thread-safe manner), and returns the
+ previous value.
+ No additional memory barrier is required.
+ This must act as a compiler barrier.
*/
SK_API int32_t sk_atomic_inc(int32_t* addr);
-/** Implemented by the porting layer, this function subtracts 1 to the int
- specified by the address (in a thread-safe manner), and returns the previous
- value.
+
+/** Implemented by the porting layer, this function subtracts one from the int
+ specified by the address (in a thread-safe manner), and returns the
+ previous value.
+ Expected to act as a release (SL/S) memory barrier and a compiler barrier.
*/
SK_API int32_t sk_atomic_dec(int32_t* addr);
+/** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected
+ to act as an aquire (L/SL) memory barrier and as a compiler barrier.
+*/
+SK_API void sk_membar_aquire__after_atomic_dec();
+
+/** Implemented by the porting layer, this function adds one to the int
+ specified by the address iff the int specified by the address is not zero
+ (in a thread-safe manner), and returns the previous value.
+ No additional memory barrier is required.
+ This must act as a compiler barrier.
+*/
+SK_API int32_t sk_atomic_conditional_inc(int32_t*);
+/** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this
+ is expected to act as an aquire (L/SL) memory barrier and as a compiler
+ barrier.
+*/
+SK_API void sk_membar_aquire__after_atomic_conditional_inc();
#endif // !SK_BUILD_FOR_ANDROID
diff --git a/include/core/SkTypeface.h b/include/core/SkTypeface.h
index bfa7d839ea..0d3ae811ec 100644
--- a/include/core/SkTypeface.h
+++ b/include/core/SkTypeface.h
@@ -11,7 +11,7 @@
#define SkTypeface_DEFINED
#include "SkAdvancedTypefaceMetrics.h"
-#include "SkRefCnt.h"
+#include "SkWeakRefCnt.h"
class SkStream;
class SkAdvancedTypefaceMetrics;
@@ -29,7 +29,7 @@ typedef uint32_t SkFontTableTag;
Typeface objects are immutable, and so they can be shared between threads.
*/
-class SK_API SkTypeface : public SkRefCnt {
+class SK_API SkTypeface : public SkWeakRefCnt {
public:
/** Style specifies the intrinsic style attributes of a given typeface
*/
@@ -186,7 +186,7 @@ private:
Style fStyle;
bool fIsFixedWidth;
- typedef SkRefCnt INHERITED;
+ typedef SkWeakRefCnt INHERITED;
};
#endif
diff --git a/include/core/SkWeakRefCnt.h b/include/core/SkWeakRefCnt.h
new file mode 100644
index 0000000000..50760e2e2b
--- /dev/null
+++ b/include/core/SkWeakRefCnt.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWeakRefCnt_DEFINED
+#define SkWeakRefCnt_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkThread.h"
+
+/** \class SkWeakRefCnt
+
+ SkWeakRefCnt is the base class for objects that may be shared by multiple
+ objects. When an existing strong owner wants to share a reference, it calls
+ ref(). When a strong owner wants to release its reference, it calls
+ unref(). When the shared object's strong reference count goes to zero as
+ the result of an unref() call, its (virtual) weak_dispose method is called.
+ It is an error for the destructor to be called explicitly (or via the
+ object going out of scope on the stack or calling delete) if
+ getRefCnt() > 1.
+
+ In addition to strong ownership, an owner may instead obtain a weak
+ reference by calling weak_ref(). A call to weak_ref() must be balanced my a
+ call to weak_unref(). To obtain a strong reference from a weak reference,
+ call try_ref(). If try_ref() returns true, the owner's pointer is now also
+ a strong reference on which unref() must be called. Note that this does not
+ affect the original weak reference, weak_unref() must still be called. When
+ the weak reference count goes to zero, the object is deleted. While the
+ weak reference count is positive and the strong reference count is zero the
+ object still exists, but will be in the disposed state. It is up to the
+ object to define what this means.
+
+ Note that a strong reference implicitly implies a weak reference. As a
+ result, it is allowable for the owner of a strong ref to call try_ref().
+ This will have the same effect as calling ref(), but may be more expensive.
+
+ Example:
+
+ SkWeakRefCnt myRef = strongRef.weak_ref();
+ ... // strongRef.unref() may or may not be called
+ if (myRef.try_ref()) {
+ ... // use myRef
+ myRef.unref();
+ } else {
+ // myRef is in the disposed state
+ }
+ myRef.weak_unref();
+*/
+class SK_API SkWeakRefCnt : public SkRefCnt {
+public:
+ /** Default construct, initializing the reference counts to 1.
+ The strong references collectively hold one weak reference. When the
+ strong reference count goes to zero, the collectively held weak
+ reference is released.
+ */
+ SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
+
+ /** Destruct, asserting that the weak reference count is 1.
+ */
+ virtual ~SkWeakRefCnt() {
+#ifdef SK_DEBUG
+ SkASSERT(fWeakCnt == 1);
+ fWeakCnt = 0;
+#endif
+ }
+
+ /** Return the weak reference count.
+ */
+ int32_t getWeakCnt() const { return fWeakCnt; }
+
+ void validate() const {
+ SkRefCnt::validate();
+ SkASSERT(fWeakCnt > 0);
+ }
+
+ /** Creates a strong reference from a weak reference, if possible. The
+ caller must already be an owner. If try_ref() returns true the owner
+ is in posession of an additional strong reference. Both the original
+ reference and new reference must be properly unreferenced. If try_ref()
+ returns false, no strong reference could be created and the owner's
+ reference is in the same state as before the call.
+ */
+ bool SK_WARN_UNUSED_RESULT try_ref() const {
+ if (sk_atomic_conditional_inc(&fRefCnt) != 0) {
+ // Aquire barrier (L/SL), if not provided above.
+ // Prevents subsequent code from happening before the increment.
+ sk_membar_aquire__after_atomic_conditional_inc();
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the weak reference count. Must be balanced by a call to
+ weak_unref().
+ */
+ void weak_ref() const {
+ SkASSERT(fRefCnt > 0);
+ SkASSERT(fWeakCnt > 0);
+ sk_atomic_inc(&fWeakCnt); // No barrier required.
+ }
+
+ /** Decrement the weak reference count. If the weak reference count is 1
+ before the decrement, then call delete on the object. Note that if this
+ is the case, then the object needs to have been allocated via new, and
+ not on the stack.
+ */
+ void weak_unref() const {
+ SkASSERT(fWeakCnt > 0);
+ // Release barrier (SL/S), if not provided below.
+ if (sk_atomic_dec(&fWeakCnt) == 1) {
+ // Aquire barrier (L/SL), if not provided above.
+ // Prevents code in destructor from happening before the decrement.
+ sk_membar_aquire__after_atomic_dec();
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fWeakCnt = 1;
+#endif
+ SkRefCnt::internal_dispose();
+ }
+ }
+
+ /** Returns true if there are no strong references to the object. When this
+ is the case all future calls to try_ref() will return false.
+ */
+ bool weak_expired() const {
+ return fRefCnt == 0;
+ }
+
+protected:
+ /** Called when the strong reference count goes to zero. This allows the
+ object to free any resources it may be holding. Weak references may
+ still exist and their level of allowed access to the object is defined
+ by the object's class.
+ */
+ virtual void weak_dispose() const {
+ }
+
+private:
+ /** Called when the strong reference count goes to zero. Calls weak_dispose
+ on the object and releases the implicit weak reference held
+ collectively by the strong references.
+ */
+ virtual void internal_dispose() const SK_OVERRIDE {
+ weak_dispose();
+ weak_unref();
+ }
+
+ /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
+ mutable int32_t fWeakCnt;
+};
+
+#endif
diff --git a/src/core/SkTypefaceCache.cpp b/src/core/SkTypefaceCache.cpp
index f4397a6cf5..ee5ced908c 100644
--- a/src/core/SkTypefaceCache.cpp
+++ b/src/core/SkTypefaceCache.cpp
@@ -13,7 +13,9 @@
#define TYPEFACE_CACHE_LIMIT 128
-void SkTypefaceCache::add(SkTypeface* face, SkTypeface::Style requestedStyle) {
+void SkTypefaceCache::add(SkTypeface* face,
+ SkTypeface::Style requestedStyle,
+ bool strong) {
if (fArray.count() >= TYPEFACE_CACHE_LIMIT) {
this->purge(TYPEFACE_CACHE_LIMIT >> 2);
}
@@ -21,7 +23,12 @@ void SkTypefaceCache::add(SkTypeface* face, SkTypeface::Style requestedStyle) {
Rec* rec = fArray.append();
rec->fFace = face;
rec->fRequestedStyle = requestedStyle;
- face->ref();
+ rec->fStrong = strong;
+ if (strong) {
+ face->ref();
+ } else {
+ face->weak_ref();
+ }
}
SkTypeface* SkTypefaceCache::findByID(SkFontID fontID) const {
@@ -36,12 +43,20 @@ SkTypeface* SkTypefaceCache::findByID(SkFontID fontID) const {
return NULL;
}
-SkTypeface* SkTypefaceCache::findByProc(FindProc proc, void* ctx) const {
+SkTypeface* SkTypefaceCache::findByProcAndRef(FindProc proc, void* ctx) const {
const Rec* curr = fArray.begin();
const Rec* stop = fArray.end();
while (curr < stop) {
- if (proc(curr->fFace, curr->fRequestedStyle, ctx)) {
- return curr->fFace;
+ SkTypeface* currFace = curr->fFace;
+ if (proc(currFace, curr->fRequestedStyle, ctx)) {
+ if (curr->fStrong) {
+ currFace->ref();
+ return currFace;
+ } else if (currFace->try_ref()) {
+ return currFace;
+ } else {
+ //remove currFace from fArray?
+ }
}
curr += 1;
}
@@ -53,8 +68,15 @@ void SkTypefaceCache::purge(int numToPurge) {
int i = 0;
while (i < count) {
SkTypeface* face = fArray[i].fFace;
- if (1 == face->getRefCnt()) {
- face->unref();
+ bool strong = fArray[i].fStrong;
+ if ((strong && face->getRefCnt() == 1) ||
+ (!strong && face->weak_expired()))
+ {
+ if (strong) {
+ face->unref();
+ } else {
+ face->weak_unref();
+ }
fArray.remove(i);
--count;
if (--numToPurge == 0) {
@@ -84,9 +106,11 @@ SkFontID SkTypefaceCache::NewFontID() {
SK_DECLARE_STATIC_MUTEX(gMutex);
-void SkTypefaceCache::Add(SkTypeface* face, SkTypeface::Style requestedStyle) {
+void SkTypefaceCache::Add(SkTypeface* face,
+ SkTypeface::Style requestedStyle,
+ bool strong) {
SkAutoMutexAcquire ama(gMutex);
- Get().add(face, requestedStyle);
+ Get().add(face, requestedStyle, strong);
}
SkTypeface* SkTypefaceCache::FindByID(SkFontID fontID) {
@@ -96,8 +120,7 @@ SkTypeface* SkTypefaceCache::FindByID(SkFontID fontID) {
SkTypeface* SkTypefaceCache::FindByProcAndRef(FindProc proc, void* ctx) {
SkAutoMutexAcquire ama(gMutex);
- SkTypeface* typeface = Get().findByProc(proc, ctx);
- SkSafeRef(typeface);
+ SkTypeface* typeface = Get().findByProcAndRef(proc, ctx);
return typeface;
}
@@ -119,7 +142,7 @@ static bool DumpProc(SkTypeface* face, SkTypeface::Style style, void* ctx) {
void SkTypefaceCache::Dump() {
#ifdef SK_DEBUG
SkAutoMutexAcquire ama(gMutex);
- (void)Get().findByProc(DumpProc, NULL);
+ (void)Get().findByProcAndRef(DumpProc, NULL);
#endif
}
diff --git a/src/core/SkTypefaceCache.h b/src/core/SkTypefaceCache.h
index e65ec90d7d..b6cab8163f 100644
--- a/src/core/SkTypefaceCache.h
+++ b/src/core/SkTypefaceCache.h
@@ -26,7 +26,7 @@ public:
/**
* Callback for FindByProc. Returns true if the given typeface is a match
* for the given context. The passed typeface is owned by the cache and is
- * not additionally ref()ed.
+ * not additionally ref()ed. The typeface may be in the disposed state.
*/
typedef bool (*FindProc)(SkTypeface*, SkTypeface::Style, void* context);
@@ -42,7 +42,9 @@ public:
* whose refcnt is 1 (meaning only the cache is an owner) will be
* unref()ed.
*/
- static void Add(SkTypeface*, SkTypeface::Style requested);
+ static void Add(SkTypeface*,
+ SkTypeface::Style requested,
+ bool strong = true);
/**
* Search the cache for a typeface with the specified fontID (uniqueID).
@@ -75,14 +77,15 @@ public:
private:
static SkTypefaceCache& Get();
- void add(SkTypeface*, SkTypeface::Style requested);
+ void add(SkTypeface*, SkTypeface::Style requested, bool strong = true);
SkTypeface* findByID(SkFontID findID) const;
- SkTypeface* findByProc(FindProc proc, void* ctx) const;
+ SkTypeface* findByProcAndRef(FindProc proc, void* ctx) const;
void purge(int count);
void purgeAll();
struct Rec {
SkTypeface* fFace;
+ bool fStrong;
SkTypeface::Style fRequestedStyle;
};
SkTDArray<Rec> fArray;
diff --git a/src/ports/SkThread_none.cpp b/src/ports/SkThread_none.cpp
index a948a5410c..56bbbae3b3 100644
--- a/src/ports/SkThread_none.cpp
+++ b/src/ports/SkThread_none.cpp
@@ -21,6 +21,14 @@ int32_t sk_atomic_dec(int32_t* addr) {
*addr = value - 1;
return value;
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ int32_t value = *addr;
+ if (value != 0) ++*addr;
+ return value;
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
SkMutex::SkMutex() {}
diff --git a/src/ports/SkThread_pthread.cpp b/src/ports/SkThread_pthread.cpp
index 6a4fade1aa..d0bb3acbad 100644
--- a/src/ports/SkThread_pthread.cpp
+++ b/src/ports/SkThread_pthread.cpp
@@ -39,6 +39,27 @@ int32_t sk_atomic_dec(int32_t* addr)
{
return __sync_fetch_and_add(addr, -1);
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr)
+{
+ int32_t value = *addr;
+
+ while (true) {
+ if (value == 0) {
+ return 0;
+ }
+
+ int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
+
+ if (before == value) {
+ return value;
+ } else {
+ value = before;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
#else
@@ -61,6 +82,17 @@ int32_t sk_atomic_dec(int32_t* addr)
*addr = value - 1;
return value;
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr)
+{
+ SkAutoMutexAcquire ac(gAtomicMutex);
+
+ int32_t value = *addr;
+ if (value != 0) ++*addr;
+ return value;
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
#endif
diff --git a/src/ports/SkThread_win.cpp b/src/ports/SkThread_win.cpp
index 6c960e25a9..e833314843 100644
--- a/src/ports/SkThread_win.cpp
+++ b/src/ports/SkThread_win.cpp
@@ -17,6 +17,7 @@
//directive.
//The pragma appears to be unnecessary, but doesn't hurt.
#pragma intrinsic(_InterlockedIncrement, _InterlockedDecrement)
+#pragma intrinsic(_InterlockedCompareExchange)
int32_t sk_atomic_inc(int32_t* addr) {
// InterlockedIncrement returns the new value, we want to return the old.
@@ -26,6 +27,22 @@ int32_t sk_atomic_inc(int32_t* addr) {
int32_t sk_atomic_dec(int32_t* addr) {
return _InterlockedDecrement(reinterpret_cast<LONG*>(addr)) + 1;
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ while (true) {
+ LONG value = static_cast<LONG const volatile&>(*addr);
+ if (value == 0) {
+ return 0;
+ }
+ if (_InterlockedCompareExchange(reinterpret_cast<LONG*>(addr),
+ value + 1,
+ value) == value) {
+ return value;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
SkMutex::SkMutex() {
SK_COMPILE_ASSERT(sizeof(fStorage) > sizeof(CRITICAL_SECTION),
diff --git a/tests/RefCntTest.cpp b/tests/RefCntTest.cpp
index e48fd8a292..4d4ae3f54a 100644
--- a/tests/RefCntTest.cpp
+++ b/tests/RefCntTest.cpp
@@ -10,6 +10,7 @@
#include "SkRefCnt.h"
#include "SkThreadUtils.h"
+#include "SkWeakRefCnt.h"
///////////////////////////////////////////////////////////////////////////////
@@ -40,5 +41,55 @@ static void test_refCnt(skiatest::Reporter* reporter) {
ref->unref();
}
+static void bounce_weak_ref(void* data) {
+ SkWeakRefCnt* ref = static_cast<SkWeakRefCnt*>(data);
+ for (int i = 0; i < 100000; ++i) {
+ if (ref->try_ref()) {
+ ref->unref();
+ }
+ }
+}
+
+static void bounce_weak_weak_ref(void* data) {
+ SkWeakRefCnt* ref = static_cast<SkWeakRefCnt*>(data);
+ for (int i = 0; i < 100000; ++i) {
+ ref->weak_ref();
+ ref->weak_unref();
+ }
+}
+
+static void test_weakRefCnt(skiatest::Reporter* reporter) {
+ SkWeakRefCnt* ref = new SkWeakRefCnt();
+
+ SkThread thing1(bounce_ref, ref);
+ SkThread thing2(bounce_ref, ref);
+ SkThread thing3(bounce_weak_ref, ref);
+ SkThread thing4(bounce_weak_weak_ref, ref);
+
+ thing1.setProcessorAffinity(0);
+ thing2.setProcessorAffinity(23);
+ thing3.setProcessorAffinity(2);
+ thing4.setProcessorAffinity(17);
+
+ SkASSERT(thing1.start());
+ SkASSERT(thing2.start());
+ SkASSERT(thing3.start());
+ SkASSERT(thing4.start());
+
+ thing1.join();
+ thing2.join();
+ thing3.join();
+ thing4.join();
+
+ REPORTER_ASSERT(reporter, ref->getRefCnt() == 1);
+ REPORTER_ASSERT(reporter, ref->getWeakCnt() == 1);
+ ref->unref();
+}
+
+static void test_refCntTests(skiatest::Reporter* reporter) {
+ test_refCnt(reporter);
+ test_weakRefCnt(reporter);
+}
+
#include "TestClassDef.h"
-DEFINE_TESTCLASS("ref_cnt", RefCntTestClass, test_refCnt)
+DEFINE_TESTCLASS("RefCnt", RefCntTestClass, test_refCntTests)