aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar reed <reed@google.com>2014-08-26 09:08:04 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-08-26 09:08:04 -0700
commit680fb9e8f10d24b5fe35c90338de37c57392f1aa (patch)
treef973f7d37ccd4deb5a23eb57c8b0a0b7e514ad3f /src
parent97b49478cfbcdb9cac6de2ab0d44f975311ef631 (diff)
retool image cache to be generic cache, allowing the client to subclass "Rec", so they can provide a custom Key and arbitrary Value.
Follow-on CLs - rename ScaledimageCache to something like GeneralCache - explore if we can use call-backs or some mechanism to completely hide "lock/unlock", by forcing all clients to support "copying" their value out of the cache as the result of a Find. R=mtklein@google.com, senorblanco@google.com, bsalomon@google.com, qiankun.miao@intel.com, senorblanco@chromium.org Author: reed@google.com Review URL: https://codereview.chromium.org/507483002
Diffstat (limited to 'src')
-rw-r--r--src/core/SkBitmapCache.cpp108
-rw-r--r--src/core/SkBitmapCache.h45
-rw-r--r--src/core/SkBitmapProcState.cpp116
-rw-r--r--src/core/SkBitmapProcState.h6
-rw-r--r--src/core/SkScaledImageCache.cpp167
-rw-r--r--src/core/SkScaledImageCache.h68
-rw-r--r--src/lazy/SkCachingPixelRef.cpp38
-rw-r--r--src/lazy/SkCachingPixelRef.h4
8 files changed, 223 insertions, 329 deletions
diff --git a/src/core/SkBitmapCache.cpp b/src/core/SkBitmapCache.cpp
index 1713fe4c50..c0ce0e6369 100644
--- a/src/core/SkBitmapCache.cpp
+++ b/src/core/SkBitmapCache.cpp
@@ -6,6 +6,8 @@
*/
#include "SkBitmapCache.h"
+#include "SkScaledImageCache.h"
+#include "SkMipMap.h"
#include "SkRect.h"
/**
@@ -41,51 +43,103 @@ public:
//////////////////////////////////////////////////////////////////////////////////////////
-SkScaledImageCache::ID* SkBitmapCache::FindAndLock(const SkBitmap& src,
- SkScalar invScaleX, SkScalar invScaleY,
- SkBitmap* result) {
+struct BitmapRec : public SkScaledImageCache::Rec {
+ BitmapRec(uint32_t genID, SkScalar scaleX, SkScalar scaleY, const SkIRect& bounds,
+ const SkBitmap& result)
+ : fKey(genID, scaleX, scaleY, bounds)
+ , fBitmap(result)
+ {}
+
+ BitmapKey fKey;
+ SkBitmap fBitmap;
+
+ virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
+ virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fBitmap.getSize(); }
+};
+
+static bool find_and_return(const BitmapKey& key, SkBitmap* result) {
+ const BitmapRec* rec = (BitmapRec*)SkScaledImageCache::FindAndLock(key);
+ if (rec) {
+ *result = rec->fBitmap;
+ SkScaledImageCache::Unlock(rec);
+
+ result->lockPixels();
+ if (result->getPixels()) {
+ return true;
+ }
+ // todo: we should explicitly purge rec from the cache at this point, since
+ // it is effectively purged already (has no memory behind it)
+ result->reset();
+ // fall-through to false
+ }
+ return false;
+}
+
+bool SkBitmapCache::Find(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
+ SkBitmap* result) {
if (0 == invScaleX || 0 == invScaleY) {
// degenerate, and the key we use for mipmaps
- return NULL;
+ return false;
}
BitmapKey key(src.getGenerationID(), invScaleX, invScaleY, get_bounds_from_bitmap(src));
- return SkScaledImageCache::FindAndLock(key, result);
+ return find_and_return(key, result);
}
-SkScaledImageCache::ID* SkBitmapCache::AddAndLock(const SkBitmap& src,
- SkScalar invScaleX, SkScalar invScaleY,
- const SkBitmap& result) {
+void SkBitmapCache::Add(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
+ const SkBitmap& result) {
if (0 == invScaleX || 0 == invScaleY) {
// degenerate, and the key we use for mipmaps
- return NULL;
+ return;
}
- BitmapKey key(src.getGenerationID(), invScaleX, invScaleY, get_bounds_from_bitmap(src));
- return SkScaledImageCache::AddAndLock(key, result);
+ SkScaledImageCache::Add(SkNEW_ARGS(BitmapRec,
+ (src.getGenerationID(), invScaleX, invScaleY,
+ get_bounds_from_bitmap(src), result)));
}
-////
-
-SkScaledImageCache::ID* SkBitmapCache::FindAndLock(uint32_t genID, int width, int height,
- SkBitmap* result) {
+bool SkBitmapCache::Find(uint32_t genID, int width, int height, SkBitmap* result) {
BitmapKey key(genID, SK_Scalar1, SK_Scalar1, SkIRect::MakeWH(width, height));
- return SkScaledImageCache::FindAndLock(key, result);
+ return find_and_return(key, result);
}
-SkScaledImageCache::ID* SkBitmapCache::AddAndLock(uint32_t genID, int width, int height,
- const SkBitmap& result) {
- BitmapKey key(genID, SK_Scalar1, SK_Scalar1, SkIRect::MakeWH(width, height));
- return SkScaledImageCache::AddAndLock(key, result);
+void SkBitmapCache::Add(uint32_t genID, int width, int height, const SkBitmap& result) {
+ SkScaledImageCache::Add(SkNEW_ARGS(BitmapRec, (genID, SK_Scalar1, SK_Scalar1,
+ SkIRect::MakeWH(width, height), result)));
}
-////
+//////////////////////////////////////////////////////////////////////////////////////////
+
+struct MipMapRec : public SkScaledImageCache::Rec {
+ MipMapRec(const SkBitmap& src, const SkMipMap* result)
+ : fKey(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src))
+ , fMipMap(SkRef(result))
+ {}
-SkScaledImageCache::ID* SkMipMapCache::FindAndLock(const SkBitmap& src, const SkMipMap** result) {
- BitmapKey key(src.getGenerationID(), SK_Scalar1, SK_Scalar1, get_bounds_from_bitmap(src));
- return SkScaledImageCache::FindAndLock(key, result);
+ virtual ~MipMapRec() {
+ fMipMap->unref();
+ }
+
+ BitmapKey fKey;
+ const SkMipMap* fMipMap;
+
+ virtual const Key& getKey() const SK_OVERRIDE { return fKey; }
+ virtual size_t bytesUsed() const SK_OVERRIDE { return sizeof(fKey) + fMipMap->getSize(); }
+};
+
+
+const SkMipMap* SkMipMapCache::FindAndRef(const SkBitmap& src) {
+ BitmapKey key(src.getGenerationID(), 0, 0, get_bounds_from_bitmap(src));
+ const MipMapRec* rec = (MipMapRec*)SkScaledImageCache::FindAndLock(key);
+ const SkMipMap* result = NULL;
+ if (rec) {
+ result = SkRef(rec->fMipMap);
+ SkScaledImageCache::Unlock(rec);
+ }
+ return result;
}
-SkScaledImageCache::ID* SkMipMapCache::AddAndLock(const SkBitmap& src, const SkMipMap* result) {
- BitmapKey key(src.getGenerationID(), SK_Scalar1, SK_Scalar1, get_bounds_from_bitmap(src));
- return SkScaledImageCache::AddAndLock(key, result);
+void SkMipMapCache::Add(const SkBitmap& src, const SkMipMap* result) {
+ if (result) {
+ SkScaledImageCache::Add(SkNEW_ARGS(MipMapRec, (src, result)));
+ }
}
diff --git a/src/core/SkBitmapCache.h b/src/core/SkBitmapCache.h
index ebade0ebd9..2b2dfbbea4 100644
--- a/src/core/SkBitmapCache.h
+++ b/src/core/SkBitmapCache.h
@@ -8,38 +8,33 @@
#ifndef SkBitmapCache_DEFINED
#define SkBitmapCache_DEFINED
-#include "SkScaledImageCache.h"
+#include "SkScalar.h"
+
+class SkBitmap;
+class SkMipMap;
class SkBitmapCache {
public:
- typedef SkScaledImageCache::ID ID;
-
- static void Unlock(ID* id) {
- SkScaledImageCache::Unlock(id);
- }
-
- /* Input: bitmap+inverse_scale */
- static ID* FindAndLock(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
- SkBitmap* result);
- static ID* AddAndLock(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
- const SkBitmap& result);
-
- /* Input: bitmap_genID+width+height */
- static ID* FindAndLock(uint32_t genID, int width, int height, SkBitmap* result);
-
- static ID* AddAndLock(uint32_t genID, int width, int height, const SkBitmap& result);
+ /**
+ * Search based on the src bitmap and inverse scales in X and Y. If found, returns true and
+ * result will be set to the matching bitmap with its pixels already locked.
+ */
+ static bool Find(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY, SkBitmap* result);
+ static void Add(const SkBitmap& src, SkScalar invScaleX, SkScalar invScaleY,
+ const SkBitmap& result);
+
+ /**
+ * Search based on the bitmap's genID, width, height. If found, returns true and
+ * result will be set to the matching bitmap with its pixels already locked.
+ */
+ static bool Find(uint32_t genID, int width, int height, SkBitmap* result);
+ static void Add(uint32_t genID, int width, int height, const SkBitmap& result);
};
class SkMipMapCache {
public:
- typedef SkScaledImageCache::ID ID;
-
- static void Unlock(ID* id) {
- SkScaledImageCache::Unlock(id);
- }
-
- static ID* FindAndLock(const SkBitmap& src, const SkMipMap** result);
- static ID* AddAndLock(const SkBitmap& src, const SkMipMap* result);
+ static const SkMipMap* FindAndRef(const SkBitmap& src);
+ static void Add(const SkBitmap& src, const SkMipMap* result);
};
#endif
diff --git a/src/core/SkBitmapProcState.cpp b/src/core/SkBitmapProcState.cpp
index 0cf4d6c4ea..7640573596 100644
--- a/src/core/SkBitmapProcState.cpp
+++ b/src/core/SkBitmapProcState.cpp
@@ -109,7 +109,7 @@ static SkScalar effective_matrix_scale_sqrd(const SkMatrix& mat) {
class AutoScaledCacheUnlocker {
public:
- AutoScaledCacheUnlocker(SkScaledImageCache::ID** idPtr) : fIDPtr(idPtr) {}
+ AutoScaledCacheUnlocker(SkScaledImageCache::ID* idPtr) : fIDPtr(idPtr) {}
~AutoScaledCacheUnlocker() {
if (fIDPtr && *fIDPtr) {
SkScaledImageCache::Unlock(*fIDPtr);
@@ -123,7 +123,7 @@ public:
}
private:
- SkScaledImageCache::ID** fIDPtr;
+ SkScaledImageCache::ID* fIDPtr;
};
#define AutoScaledCacheUnlocker(...) SK_REQUIRE_LOCAL_VAR(AutoScaledCacheUnlocker)
@@ -147,10 +147,7 @@ static inline bool cache_size_okay(const SkBitmap& bm, const SkMatrix& invMat) {
// the interface to the cache, but might be well worth it.
bool SkBitmapProcState::possiblyScaleImage() {
- AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
-
SkASSERT(NULL == fBitmap);
- SkASSERT(NULL == fScaledCacheID);
if (fFilterLevel <= SkPaint::kLow_FilterLevel) {
return false;
@@ -183,20 +180,7 @@ bool SkBitmapProcState::possiblyScaleImage() {
return false;
}
- fScaledCacheID = SkBitmapCache::FindAndLock(fOrigBitmap, invScaleX, invScaleY,
- &fScaledBitmap);
- if (fScaledCacheID) {
- fScaledBitmap.lockPixels();
- if (!fScaledBitmap.getPixels()) {
- fScaledBitmap.unlockPixels();
- // found a purged entry (discardablememory?), release it
- SkScaledImageCache::Unlock(fScaledCacheID);
- fScaledCacheID = NULL;
- // fall through to rebuild
- }
- }
-
- if (NULL == fScaledCacheID) {
+ if (!SkBitmapCache::Find(fOrigBitmap, invScaleX, invScaleY, &fScaledBitmap)) {
float dest_width = fOrigBitmap.width() / invScaleX;
float dest_height = fOrigBitmap.height() / invScaleY;
@@ -215,13 +199,7 @@ bool SkBitmapProcState::possiblyScaleImage() {
}
SkASSERT(NULL != fScaledBitmap.getPixels());
- fScaledCacheID = SkBitmapCache::AddAndLock(fOrigBitmap, invScaleX, invScaleY,
- fScaledBitmap);
- if (!fScaledCacheID) {
- fScaledBitmap.reset();
- return false;
- }
- SkASSERT(NULL != fScaledBitmap.getPixels());
+ SkBitmapCache::Add(fOrigBitmap, invScaleX, invScaleY, fScaledBitmap);
}
SkASSERT(NULL != fScaledBitmap.getPixels());
@@ -235,7 +213,6 @@ bool SkBitmapProcState::possiblyScaleImage() {
// image might require is some interpolation if the translation
// is fractional.
fFilterLevel = SkPaint::kLow_FilterLevel;
- unlocker.release();
return true;
}
@@ -280,39 +257,30 @@ bool SkBitmapProcState::possiblyScaleImage() {
* a scale > 1 to indicate down scaling by the CTM.
*/
if (scaleSqd > SK_Scalar1) {
- const SkMipMap* mip = NULL;
-
- SkASSERT(NULL == fScaledCacheID);
- fScaledCacheID = SkMipMapCache::FindAndLock(fOrigBitmap, &mip);
- if (!fScaledCacheID) {
- SkASSERT(NULL == mip);
- mip = SkMipMap::Build(fOrigBitmap);
- if (mip) {
- fScaledCacheID = SkMipMapCache::AddAndLock(fOrigBitmap, mip);
- SkASSERT(mip->getRefCnt() > 1);
- mip->unref(); // the cache took a ref
- SkASSERT(fScaledCacheID);
- }
- } else {
- SkASSERT(mip);
- }
-
- if (mip) {
- SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));
- SkMipMap::Level level;
- if (mip->extractLevel(levelScale, &level)) {
- SkScalar invScaleFixup = level.fScale;
- fInvMatrix.postScale(invScaleFixup, invScaleFixup);
-
- SkImageInfo info = fOrigBitmap.info();
- info.fWidth = level.fWidth;
- info.fHeight = level.fHeight;
- fScaledBitmap.installPixels(info, level.fPixels, level.fRowBytes);
- fBitmap = &fScaledBitmap;
- fFilterLevel = SkPaint::kLow_FilterLevel;
- unlocker.release();
- return true;
+ fCurrMip.reset(SkMipMapCache::FindAndRef(fOrigBitmap));
+ if (NULL == fCurrMip.get()) {
+ fCurrMip.reset(SkMipMap::Build(fOrigBitmap));
+ if (NULL == fCurrMip.get()) {
+ return false;
}
+ SkMipMapCache::Add(fOrigBitmap, fCurrMip);
+ }
+
+ SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));
+ SkMipMap::Level level;
+ if (fCurrMip->extractLevel(levelScale, &level)) {
+ SkScalar invScaleFixup = level.fScale;
+ fInvMatrix.postScale(invScaleFixup, invScaleFixup);
+
+ SkImageInfo info = fOrigBitmap.info();
+ info.fWidth = level.fWidth;
+ info.fHeight = level.fHeight;
+ // todo: if we could wrap the fCurrMip in a pixelref, then we could just install
+ // that here, and not need to explicitly track it ourselves.
+ fScaledBitmap.installPixels(info, level.fPixels, level.fRowBytes);
+ fBitmap = &fScaledBitmap;
+ fFilterLevel = SkPaint::kLow_FilterLevel;
+ return true;
}
}
@@ -336,12 +304,8 @@ static bool get_locked_pixels(const SkBitmap& src, int pow2, SkBitmap* dst) {
}
bool SkBitmapProcState::lockBaseBitmap() {
- AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
-
SkPixelRef* pr = fOrigBitmap.pixelRef();
- SkASSERT(NULL == fScaledCacheID);
-
if (pr->isLocked() || !pr->implementsDecodeInto()) {
// fast-case, no need to look in our cache
fScaledBitmap = fOrigBitmap;
@@ -350,19 +314,7 @@ bool SkBitmapProcState::lockBaseBitmap() {
return false;
}
} else {
- fScaledCacheID = SkBitmapCache::FindAndLock(fOrigBitmap, 1, 1, &fScaledBitmap);
- if (fScaledCacheID) {
- fScaledBitmap.lockPixels();
- if (!fScaledBitmap.getPixels()) {
- fScaledBitmap.unlockPixels();
- // found a purged entry (discardablememory?), release it
- SkScaledImageCache::Unlock(fScaledCacheID);
- fScaledCacheID = NULL;
- // fall through to rebuild
- }
- }
-
- if (NULL == fScaledCacheID) {
+ if (!SkBitmapCache::Find(fOrigBitmap, 1, 1, &fScaledBitmap)) {
if (!get_locked_pixels(fOrigBitmap, 0, &fScaledBitmap)) {
return false;
}
@@ -370,22 +322,14 @@ bool SkBitmapProcState::lockBaseBitmap() {
// TODO: if fScaled comes back at a different width/height than fOrig,
// we need to update the matrix we are using to sample from this guy.
- fScaledCacheID = SkBitmapCache::AddAndLock(fOrigBitmap, 1, 1, fScaledBitmap);
- if (!fScaledCacheID) {
- fScaledBitmap.reset();
- return false;
- }
+ SkBitmapCache::Add(fOrigBitmap, 1, 1, fScaledBitmap);
}
}
fBitmap = &fScaledBitmap;
- unlocker.release();
return true;
}
SkBitmapProcState::~SkBitmapProcState() {
- if (fScaledCacheID) {
- SkScaledImageCache::Unlock(fScaledCacheID);
- }
SkDELETE(fBitmapFilter);
}
@@ -396,8 +340,6 @@ bool SkBitmapProcState::chooseProcs(const SkMatrix& inv, const SkPaint& paint) {
fInvMatrix = inv;
fFilterLevel = paint.getFilterLevel();
- SkASSERT(NULL == fScaledCacheID);
-
// possiblyScaleImage will look to see if it can rescale the image as a
// preprocess; either by scaling up to the target size, or by selecting
// a nearby mipmap level. If it does, it will adjust the working
diff --git a/src/core/SkBitmapProcState.h b/src/core/SkBitmapProcState.h
index 73d7e904b3..a249ed6ce1 100644
--- a/src/core/SkBitmapProcState.h
+++ b/src/core/SkBitmapProcState.h
@@ -13,6 +13,7 @@
#include "SkBitmap.h"
#include "SkBitmapFilter.h"
#include "SkMatrix.h"
+#include "SkMipMap.h"
#include "SkPaint.h"
#include "SkScaledImageCache.h"
@@ -36,7 +37,7 @@ class SkPaint;
struct SkBitmapProcState {
- SkBitmapProcState(): fScaledCacheID(NULL), fBitmapFilter(NULL) {}
+ SkBitmapProcState() : fBitmapFilter(NULL) {}
~SkBitmapProcState();
typedef void (*ShaderProc32)(const SkBitmapProcState&, int x, int y,
@@ -142,7 +143,8 @@ private:
SkBitmap fOrigBitmap; // CONSTRUCTOR
SkBitmap fScaledBitmap; // chooseProcs
- SkScaledImageCache::ID* fScaledCacheID;
+ SkAutoTUnref<const SkMipMap> fCurrMip;
+// SkScaledImageCache::ID fScaledCacheID;
MatrixProc chooseMatrixProc(bool trivial_matrix);
bool chooseProcs(const SkMatrix& inv, const SkPaint&);
diff --git a/src/core/SkScaledImageCache.cpp b/src/core/SkScaledImageCache.cpp
index 6a24b5d5fa..9d15422300 100644
--- a/src/core/SkScaledImageCache.cpp
+++ b/src/core/SkScaledImageCache.cpp
@@ -21,14 +21,6 @@
#define SK_DEFAULT_IMAGE_CACHE_LIMIT (2 * 1024 * 1024)
#endif
-static inline SkScaledImageCache::ID* rec_to_id(SkScaledImageCache::Rec* rec) {
- return reinterpret_cast<SkScaledImageCache::ID*>(rec);
-}
-
-static inline SkScaledImageCache::Rec* id_to_rec(SkScaledImageCache::ID* id) {
- return reinterpret_cast<SkScaledImageCache::Rec*>(id);
-}
-
void SkScaledImageCache::Key::init(size_t length) {
SkASSERT(SkAlign4(length) == length);
// 2 is fCount32 and fHash
@@ -37,50 +29,6 @@ void SkScaledImageCache::Key::init(size_t length) {
fHash = SkChecksum::Murmur3(this->as32() + 2, (fCount32 - 2) << 2);
}
-SkScaledImageCache::Key* SkScaledImageCache::Key::clone() const {
- size_t size = fCount32 << 2;
- void* copy = sk_malloc_throw(size);
- memcpy(copy, this, size);
- return (Key*)copy;
-}
-
-struct SkScaledImageCache::Rec {
- Rec(const Key& key, const SkBitmap& bm) : fKey(key.clone()), fBitmap(bm) {
- fLockCount = 1;
- fMip = NULL;
- }
-
- Rec(const Key& key, const SkMipMap* mip) : fKey(key.clone()) {
- fLockCount = 1;
- fMip = mip;
- mip->ref();
- }
-
- ~Rec() {
- SkSafeUnref(fMip);
- sk_free(fKey);
- }
-
- static const Key& GetKey(const Rec& rec) { return *rec.fKey; }
- static uint32_t Hash(const Key& key) { return key.hash(); }
-
- size_t bytesUsed() const {
- return fMip ? fMip->getSize() : fBitmap.getSize();
- }
-
- Rec* fNext;
- Rec* fPrev;
-
- // this guy wants to be 64bit aligned
- Key* fKey;
-
- int32_t fLockCount;
-
- // we use either fBitmap or fMip, but not both
- SkBitmap fBitmap;
- const SkMipMap* fMip;
-};
-
#include "SkTDynamicHash.h"
class SkScaledImageCache::Hash :
@@ -259,11 +207,7 @@ SkScaledImageCache::~SkScaledImageCache() {
////////////////////////////////////////////////////////////////////////////////
-/**
- This private method is the fully general record finder. All other
- record finders should call this function or the one above.
- */
-SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const SkScaledImageCache::Key& key) {
+const SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const Key& key) {
#ifdef USE_HASH
Rec* rec = fHash->find(key);
#else
@@ -276,41 +220,13 @@ SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const SkScaledImageCach
return rec;
}
-SkScaledImageCache::ID* SkScaledImageCache::findAndLock(const Key& key, SkBitmap* result) {
- Rec* rec = this->findAndLock(key);
- if (rec) {
- SkASSERT(NULL == rec->fMip);
- SkASSERT(rec->fBitmap.pixelRef());
- *result = rec->fBitmap;
- }
- return rec_to_id(rec);
-}
-
-SkScaledImageCache::ID* SkScaledImageCache::findAndLock(const Key& key, const SkMipMap** mip) {
- Rec* rec = this->findAndLock(key);
- if (rec) {
- SkASSERT(rec->fMip);
- SkASSERT(NULL == rec->fBitmap.pixelRef());
- *mip = rec->fMip;
- }
- return rec_to_id(rec);
-}
-
-
-////////////////////////////////////////////////////////////////////////////////
-/**
- This private method is the fully general record adder. All other
- record adders should call this funtion. */
-SkScaledImageCache::ID* SkScaledImageCache::addAndLock(SkScaledImageCache::Rec* rec) {
+const SkScaledImageCache::Rec* SkScaledImageCache::addAndLock(Rec* rec) {
SkASSERT(rec);
// See if we already have this key (racy inserts, etc.)
- Rec* existing = this->findAndLock(*rec->fKey);
+ const Rec* existing = this->findAndLock(rec->getKey());
if (NULL != existing) {
- // Since we already have a matching entry, just delete the new one and return.
- // Call sites cannot assume the passed in object will live past this call.
- existing->fBitmap = rec->fBitmap;
SkDELETE(rec);
- return rec_to_id(existing);
+ return existing;
}
this->addToHead(rec);
@@ -321,20 +237,29 @@ SkScaledImageCache::ID* SkScaledImageCache::addAndLock(SkScaledImageCache::Rec*
#endif
// We may (now) be overbudget, so see if we need to purge something.
this->purgeAsNeeded();
- return rec_to_id(rec);
-}
-
-SkScaledImageCache::ID* SkScaledImageCache::addAndLock(const Key& key, const SkBitmap& scaled) {
- Rec* rec = SkNEW_ARGS(Rec, (key, scaled));
- return this->addAndLock(rec);
+ return rec;
}
-SkScaledImageCache::ID* SkScaledImageCache::addAndLock(const Key& key, const SkMipMap* mip) {
- Rec* rec = SkNEW_ARGS(Rec, (key, mip));
- return this->addAndLock(rec);
+void SkScaledImageCache::add(Rec* rec) {
+ SkASSERT(rec);
+ // See if we already have this key (racy inserts, etc.)
+ const Rec* existing = this->findAndLock(rec->getKey());
+ if (NULL != existing) {
+ SkDELETE(rec);
+ this->unlock(existing);
+ return;
+ }
+
+ this->addToHead(rec);
+ SkASSERT(1 == rec->fLockCount);
+#ifdef USE_HASH
+ SkASSERT(fHash);
+ fHash->add(rec);
+#endif
+ this->unlock(rec);
}
-void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
+void SkScaledImageCache::unlock(SkScaledImageCache::ID id) {
SkASSERT(id);
#ifdef SK_DEBUG
@@ -342,7 +267,7 @@ void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
bool found = false;
Rec* rec = fHead;
while (rec != NULL) {
- if (rec == id_to_rec(id)) {
+ if (rec == id) {
found = true;
break;
}
@@ -351,9 +276,10 @@ void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
SkASSERT(found);
}
#endif
- Rec* rec = id_to_rec(id);
+ const Rec* rec = id;
SkASSERT(rec->fLockCount > 0);
- rec->fLockCount -= 1;
+ // We're under our lock, and we're the only possible mutator, so unconsting is fine.
+ const_cast<Rec*>(rec)->fLockCount -= 1;
// we may have been over-budget, but now have released something, so check
// if we should purge.
@@ -389,7 +315,7 @@ void SkScaledImageCache::purgeAsNeeded() {
SkASSERT(used <= bytesUsed);
this->detach(rec);
#ifdef USE_HASH
- fHash->remove(*rec->fKey);
+ fHash->remove(rec->getKey());
#endif
SkDELETE(rec);
@@ -575,27 +501,7 @@ static SkScaledImageCache* get_cache() {
return gScaledImageCache;
}
-SkScaledImageCache::ID* SkScaledImageCache::FindAndLock(const Key& key, SkBitmap* result) {
- SkAutoMutexAcquire am(gMutex);
- return get_cache()->findAndLock(key, result);
-}
-
-SkScaledImageCache::ID* SkScaledImageCache::FindAndLock(const Key& key, SkMipMap const ** mip) {
- SkAutoMutexAcquire am(gMutex);
- return get_cache()->findAndLock(key, mip);
-}
-
-SkScaledImageCache::ID* SkScaledImageCache::AddAndLock(const Key& key, const SkBitmap& scaled) {
- SkAutoMutexAcquire am(gMutex);
- return get_cache()->addAndLock(key, scaled);
-}
-
-SkScaledImageCache::ID* SkScaledImageCache::AddAndLock(const Key& key, const SkMipMap* mip) {
- SkAutoMutexAcquire am(gMutex);
- return get_cache()->addAndLock(key, mip);
-}
-
-void SkScaledImageCache::Unlock(SkScaledImageCache::ID* id) {
+void SkScaledImageCache::Unlock(SkScaledImageCache::ID id) {
SkAutoMutexAcquire am(gMutex);
get_cache()->unlock(id);
@@ -637,6 +543,21 @@ size_t SkScaledImageCache::GetSingleAllocationByteLimit() {
return get_cache()->getSingleAllocationByteLimit();
}
+const SkScaledImageCache::Rec* SkScaledImageCache::FindAndLock(const Key& key) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->findAndLock(key);
+}
+
+const SkScaledImageCache::Rec* SkScaledImageCache::AddAndLock(Rec* rec) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->addAndLock(rec);
+}
+
+void SkScaledImageCache::Add(Rec* rec) {
+ SkAutoMutexAcquire am(gMutex);
+ get_cache()->add(rec);
+}
+
///////////////////////////////////////////////////////////////////////////////
#include "SkGraphics.h"
diff --git a/src/core/SkScaledImageCache.h b/src/core/SkScaledImageCache.h
index 9eea55079b..c31f8ffed9 100644
--- a/src/core/SkScaledImageCache.h
+++ b/src/core/SkScaledImageCache.h
@@ -25,8 +25,6 @@ class SkMipMap;
*/
class SkScaledImageCache {
public:
- struct ID;
-
struct Key {
// Call this to access your private contents. Must not use the address after calling init()
void* writableContents() { return this + 1; }
@@ -49,9 +47,6 @@ public:
return true;
}
- // delete using sk_free
- Key* clone() const;
-
private:
// store fCount32 first, so we don't consider it in operator<
int32_t fCount32; // 2 + user contents count32
@@ -62,6 +57,32 @@ public:
const uint32_t* as32SkipCount() const { return this->as32() + 1; }
};
+ struct Rec {
+ typedef SkScaledImageCache::Key Key;
+
+ Rec() : fLockCount(1) {}
+ virtual ~Rec() {}
+
+ uint32_t getHash() const { return this->getKey().hash(); }
+
+ virtual const Key& getKey() const = 0;
+ virtual size_t bytesUsed() const = 0;
+
+ // for SkTDynamicHash::Traits
+ static uint32_t Hash(const Key& key) { return key.hash(); }
+ static const Key& GetKey(const Rec& rec) { return rec.getKey(); }
+
+ private:
+ Rec* fNext;
+ Rec* fPrev;
+ int32_t fLockCount;
+ int32_t fPad;
+
+ friend class SkScaledImageCache;
+ };
+
+ typedef const Rec* ID;
+
/**
* Returns a locked/pinned SkDiscardableMemory instance for the specified
* number of bytes, or NULL on failure.
@@ -73,13 +94,10 @@ public:
* instance of this cache.
*/
- static ID* FindAndLock(const Key&, SkBitmap* result);
- static ID* AddAndLock(const Key&, const SkBitmap& result);
-
- static ID* FindAndLock(const Key&, const SkMipMap** result);
- static ID* AddAndLock(const Key&, const SkMipMap* result);
-
- static void Unlock(ID*);
+ static const Rec* FindAndLock(const Key& key);
+ static const Rec* AddAndLock(Rec*);
+ static void Add(Rec*);
+ static void Unlock(ID);
static size_t GetTotalBytesUsed();
static size_t GetTotalByteLimit();
@@ -115,22 +133,9 @@ public:
explicit SkScaledImageCache(size_t byteLimit);
~SkScaledImageCache();
- /**
- * Search the cache for a matching key. If found, return its bitmap and return its ID pointer.
- * Use the returned ID to unlock the cache when you are done using outBitmap.
- *
- * If a match is not found, outBitmap will be unmodifed, and NULL will be returned.
- */
- ID* findAndLock(const Key& key, SkBitmap* outBitmap);
- ID* findAndLock(const Key& key, const SkMipMap** returnedMipMap);
-
- /**
- * To add a new bitmap (or mipMap) to the cache, call
- * AddAndLock. Use the returned ptr to unlock the cache when you
- * are done using scaled.
- */
- ID* addAndLock(const Key&, const SkBitmap& bitmap);
- ID* addAndLock(const Key&, const SkMipMap* mipMap);
+ const Rec* findAndLock(const Key& key);
+ const Rec* addAndLock(Rec*);
+ void add(Rec*);
/**
* Given a non-null ID ptr returned by either findAndLock or addAndLock,
@@ -138,7 +143,7 @@ public:
* if needed. After this, the cached bitmap should no longer be
* referenced by the caller.
*/
- void unlock(ID*);
+ void unlock(ID);
size_t getTotalBytesUsed() const { return fTotalBytesUsed; }
size_t getTotalByteLimit() const { return fTotalByteLimit; }
@@ -164,8 +169,6 @@ public:
*/
void dump() const;
-public:
- struct Rec;
private:
Rec* fHead;
Rec* fTail;
@@ -182,9 +185,6 @@ private:
size_t fSingleAllocationByteLimit;
int fCount;
- Rec* findAndLock(const Key& key);
- ID* addAndLock(Rec* rec);
-
void purgeRec(Rec*);
void purgeAsNeeded();
diff --git a/src/lazy/SkCachingPixelRef.cpp b/src/lazy/SkCachingPixelRef.cpp
index c59682854b..6d97aaeee9 100644
--- a/src/lazy/SkCachingPixelRef.cpp
+++ b/src/lazy/SkCachingPixelRef.cpp
@@ -30,13 +30,11 @@ SkCachingPixelRef::SkCachingPixelRef(const SkImageInfo& info,
: INHERITED(info)
, fImageGenerator(generator)
, fErrorInDecoding(false)
- , fScaledCacheId(NULL)
, fRowBytes(rowBytes) {
SkASSERT(fImageGenerator != NULL);
}
SkCachingPixelRef::~SkCachingPixelRef() {
SkDELETE(fImageGenerator);
- SkASSERT(NULL == fScaledCacheId);
// Assert always unlock before unref.
}
@@ -46,48 +44,28 @@ bool SkCachingPixelRef::onNewLockPixels(LockRec* rec) {
}
const SkImageInfo& info = this->info();
- SkBitmap bitmap;
- SkASSERT(NULL == fScaledCacheId);
- fScaledCacheId = SkBitmapCache::FindAndLock(this->getGenerationID(), info.fWidth, info.fHeight,
- &bitmap);
- if (NULL == fScaledCacheId) {
+ if (!SkBitmapCache::Find(this->getGenerationID(), info.fWidth, info.fHeight, &fLockedBitmap)) {
// Cache has been purged, must re-decode.
- if (!bitmap.allocPixels(info, fRowBytes)) {
+ if (!fLockedBitmap.allocPixels(info, fRowBytes)) {
fErrorInDecoding = true;
return false;
}
- SkAutoLockPixels autoLockPixels(bitmap);
- if (!fImageGenerator->getPixels(info, bitmap.getPixels(), fRowBytes)) {
+ if (!fImageGenerator->getPixels(info, fLockedBitmap.getPixels(), fRowBytes)) {
fErrorInDecoding = true;
return false;
}
- fScaledCacheId = SkBitmapCache::AddAndLock(this->getGenerationID(),
- info.fWidth, info.fHeight, bitmap);
- SkASSERT(fScaledCacheId != NULL);
+ SkBitmapCache::Add(this->getGenerationID(), info.fWidth, info.fHeight, fLockedBitmap);
}
- // Now bitmap should contain a concrete PixelRef of the decoded
- // image.
- SkAutoLockPixels autoLockPixels(bitmap);
- void* pixels = bitmap.getPixels();
+ // Now bitmap should contain a concrete PixelRef of the decoded image.
+ void* pixels = fLockedBitmap.getPixels();
SkASSERT(pixels != NULL);
-
- // At this point, the autoLockPixels will unlockPixels()
- // to remove bitmap's lock on the pixels. We will then
- // destroy bitmap. The *only* guarantee that this pointer
- // remains valid is the guarantee made by
- // SkScaledImageCache that it will not destroy the *other*
- // bitmap (SkScaledImageCache::Rec.fBitmap) that holds a
- // reference to the concrete PixelRef while this record is
- // locked.
rec->fPixels = pixels;
rec->fColorTable = NULL;
- rec->fRowBytes = bitmap.rowBytes();
+ rec->fRowBytes = fLockedBitmap.rowBytes();
return true;
}
void SkCachingPixelRef::onUnlockPixels() {
- SkASSERT(fScaledCacheId != NULL);
- SkScaledImageCache::Unlock( static_cast<SkScaledImageCache::ID*>(fScaledCacheId));
- fScaledCacheId = NULL;
+ fLockedBitmap.reset();
}
diff --git a/src/lazy/SkCachingPixelRef.h b/src/lazy/SkCachingPixelRef.h
index 1e87874f74..306f714169 100644
--- a/src/lazy/SkCachingPixelRef.h
+++ b/src/lazy/SkCachingPixelRef.h
@@ -8,6 +8,7 @@
#ifndef SkCachingPixelRef_DEFINED
#define SkCachingPixelRef_DEFINED
+#include "SkBitmapCache.h"
#include "SkImageInfo.h"
#include "SkImageGenerator.h"
#include "SkPixelRef.h"
@@ -52,9 +53,10 @@ protected:
private:
SkImageGenerator* const fImageGenerator;
bool fErrorInDecoding;
- void* fScaledCacheId;
const size_t fRowBytes;
+ SkBitmap fLockedBitmap;
+
SkCachingPixelRef(const SkImageInfo&, SkImageGenerator*, size_t rowBytes);
typedef SkPixelRef INHERITED;