aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--gyp/tests.gyp2
-rw-r--r--src/core/SkScaledImageCache.cpp208
-rw-r--r--src/core/SkScaledImageCache.h71
-rw-r--r--src/lazy/SkLazyPixelRef.cpp118
-rw-r--r--src/lazy/SkLazyPixelRef.h15
-rw-r--r--tests/CachedDecodingPixelRefTest.cpp204
6 files changed, 524 insertions, 94 deletions
diff --git a/gyp/tests.gyp b/gyp/tests.gyp
index 2e04de1b0e..6b286bb30a 100644
--- a/gyp/tests.gyp
+++ b/gyp/tests.gyp
@@ -10,6 +10,7 @@
'include_dirs' : [
'../src/core',
'../src/effects',
+ '../src/image',
'../src/lazy',
'../src/pathops',
'../src/pdf',
@@ -34,6 +35,7 @@
'../tests/BitSetTest.cpp',
'../tests/BlitRowTest.cpp',
'../tests/BlurTest.cpp',
+ '../tests/CachedDecodingPixelRefTest.cpp',
'../tests/CanvasTest.cpp',
'../tests/CanvasStateTest.cpp',
'../tests/ChecksumTest.cpp',
diff --git a/src/core/SkScaledImageCache.cpp b/src/core/SkScaledImageCache.cpp
index 11a0ee448f..644ce7f05f 100644
--- a/src/core/SkScaledImageCache.cpp
+++ b/src/core/SkScaledImageCache.cpp
@@ -7,6 +7,7 @@
#include "SkScaledImageCache.h"
#include "SkMipMap.h"
+#include "SkOnce.h"
#include "SkPixelRef.h"
#include "SkRect.h"
@@ -14,6 +15,13 @@
#define SK_DEFAULT_IMAGE_CACHE_LIMIT (2 * 1024 * 1024)
#endif
+static inline SkScaledImageCache::ID* rec_to_id(SkScaledImageCache::Rec* rec) {
+ return reinterpret_cast<SkScaledImageCache::ID*>(rec);
+}
+
+static inline SkScaledImageCache::Rec* id_to_rec(SkScaledImageCache::ID* id) {
+ return reinterpret_cast<SkScaledImageCache::Rec*>(id);
+}
// Implemented from en.wikipedia.org/wiki/MurmurHash.
static uint32_t compute_hash(const uint32_t data[], int count) {
@@ -42,23 +50,15 @@ static uint32_t compute_hash(const uint32_t data[], int count) {
}
struct Key {
- bool init(const SkBitmap& bm, SkScalar scaleX, SkScalar scaleY) {
- SkPixelRef* pr = bm.pixelRef();
- if (!pr) {
- return false;
- }
-
- size_t x, y;
- SkTDivMod(bm.pixelRefOffset(), bm.rowBytes(), &y, &x);
- x >>= 2;
-
- fGenID = pr->getGenerationID();
- fBounds.set(x, y, x + bm.width(), y + bm.height());
- fScaleX = scaleX;
- fScaleY = scaleY;
-
+ Key(uint32_t genID,
+ SkScalar scaleX,
+ SkScalar scaleY,
+ SkIRect bounds)
+ : fGenID(genID)
+ , fScaleX(scaleX)
+ , fScaleY(scaleY)
+ , fBounds(bounds) {
fHash = compute_hash(&fGenID, 7);
- return true;
}
bool operator<(const Key& other) const {
@@ -151,6 +151,17 @@ class SkScaledImageCache::Hash : public SkTDynamicHash<SkScaledImageCache::Rec,
// experimental hash to speed things up
#define USE_HASH
+#if !defined(USE_HASH)
+static inline SkScaledImageCache::Rec* find_rec_in_list(
+ SkScaledImageCache::Rec* head, const Key & key) {
+ SkScaledImageCache::Rec* rec = head;
+ while ((rec != NULL) && (rec->fKey != key)) {
+ rec = rec->fNext;
+ }
+ return rec;
+}
+#endif
+
SkScaledImageCache::SkScaledImageCache(size_t byteLimit) {
fHead = NULL;
fTail = NULL;
@@ -174,26 +185,24 @@ SkScaledImageCache::~SkScaledImageCache() {
delete fHash;
}
-SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const SkBitmap& orig,
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ This private method is the fully general record finder. All other
+ record finders should call this funtion. */
+SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(uint32_t genID,
SkScalar scaleX,
- SkScalar scaleY) {
- Key key;
- if (!key.init(orig, scaleX, scaleY)) {
+ SkScalar scaleY,
+ const SkIRect& bounds) {
+ if (bounds.isEmpty()) {
return NULL;
}
-
+ Key key(genID, scaleX, scaleY, bounds);
#ifdef USE_HASH
Rec* rec = fHash->find(key);
#else
- Rec* rec = fHead;
- while (rec != NULL) {
- if (rec->fKey == key) {
- break;
- }
- rec = rec->fNext;
- }
+ Rec* rec = find_rec_in_list(fHead, key);
#endif
-
if (rec) {
this->moveToHead(rec); // for our LRU
rec->fLockCount += 1;
@@ -201,6 +210,36 @@ SkScaledImageCache::Rec* SkScaledImageCache::findAndLock(const SkBitmap& orig,
return rec;
}
+/**
+ This function finds the bounds of the bitmap *within its pixelRef*.
+ If the bitmap lacks a pixelRef, it will return an empty rect, since
+ that doesn't make sense. This may be a useful enough function that
+ it should be somewhere else (in SkBitmap?). */
+static SkIRect get_bounds_from_bitmap(const SkBitmap& bm) {
+ if (!(bm.pixelRef())) {
+ return SkIRect::MakeEmpty();
+ }
+ size_t x, y;
+ SkTDivMod(bm.pixelRefOffset(), bm.rowBytes(), &y, &x);
+ x >>= bm.shiftPerPixel();
+ return SkIRect::MakeXYWH(x, y, bm.width(), bm.height());
+}
+
+
+SkScaledImageCache::ID* SkScaledImageCache::findAndLock(uint32_t genID,
+ int32_t width,
+ int32_t height,
+ SkBitmap* bitmap) {
+ Rec* rec = this->findAndLock(genID, SK_Scalar1, SK_Scalar1,
+ SkIRect::MakeWH(width, height));
+ if (rec) {
+ SkASSERT(NULL == rec->fMip);
+ SkASSERT(rec->fBitmap.pixelRef());
+ *bitmap = rec->fBitmap;
+ }
+ return rec_to_id(rec);
+}
+
SkScaledImageCache::ID* SkScaledImageCache::findAndLock(const SkBitmap& orig,
SkScalar scaleX,
SkScalar scaleY,
@@ -209,25 +248,53 @@ SkScaledImageCache::ID* SkScaledImageCache::findAndLock(const SkBitmap& orig,
// degenerate, and the key we use for mipmaps
return NULL;
}
-
- Rec* rec = this->findAndLock(orig, scaleX, scaleY);
+ Rec* rec = this->findAndLock(orig.getGenerationID(), scaleX,
+ scaleY, get_bounds_from_bitmap(orig));
if (rec) {
SkASSERT(NULL == rec->fMip);
SkASSERT(rec->fBitmap.pixelRef());
*scaled = rec->fBitmap;
}
- return (ID*)rec;
+ return rec_to_id(rec);
}
SkScaledImageCache::ID* SkScaledImageCache::findAndLockMip(const SkBitmap& orig,
SkMipMap const ** mip) {
- Rec* rec = this->findAndLock(orig, 0, 0);
+ Rec* rec = this->findAndLock(orig.getGenerationID(), 0, 0,
+ get_bounds_from_bitmap(orig));
if (rec) {
SkASSERT(rec->fMip);
SkASSERT(NULL == rec->fBitmap.pixelRef());
*mip = rec->fMip;
}
- return (ID*)rec;
+ return rec_to_id(rec);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+/**
+ This private method is the fully general record adder. All other
+ record adders should call this funtion. */
+void SkScaledImageCache::addAndLock(SkScaledImageCache::Rec* rec) {
+ SkASSERT(rec);
+ this->addToHead(rec);
+ SkASSERT(1 == rec->fLockCount);
+#ifdef USE_HASH
+ SkASSERT(fHash);
+ fHash->add(rec);
+#endif
+ // We may (now) be overbudget, so see if we need to purge something.
+ this->purgeAsNeeded();
+}
+
+SkScaledImageCache::ID* SkScaledImageCache::addAndLock(uint32_t genID,
+ int32_t width,
+ int32_t height,
+ const SkBitmap& bitmap) {
+ Key key(genID, SK_Scalar1, SK_Scalar1, SkIRect::MakeWH(width, height));
+ Rec* rec = SkNEW_ARGS(Rec, (key, bitmap));
+ this->addAndLock(rec);
+ return rec_to_id(rec);
}
SkScaledImageCache::ID* SkScaledImageCache::addAndLock(const SkBitmap& orig,
@@ -238,43 +305,26 @@ SkScaledImageCache::ID* SkScaledImageCache::addAndLock(const SkBitmap& orig,
// degenerate, and the key we use for mipmaps
return NULL;
}
-
- Key key;
- if (!key.init(orig, scaleX, scaleY)) {
+ SkIRect bounds = get_bounds_from_bitmap(orig);
+ if (bounds.isEmpty()) {
return NULL;
}
-
+ Key key(orig.getGenerationID(), scaleX, scaleY, bounds);
Rec* rec = SkNEW_ARGS(Rec, (key, scaled));
- this->addToHead(rec);
- SkASSERT(1 == rec->fLockCount);
-
-#ifdef USE_HASH
- fHash->add(rec);
-#endif
-
- // We may (now) be overbudget, so see if we need to purge something.
- this->purgeAsNeeded();
- return (ID*)rec;
+ this->addAndLock(rec);
+ return rec_to_id(rec);
}
SkScaledImageCache::ID* SkScaledImageCache::addAndLockMip(const SkBitmap& orig,
const SkMipMap* mip) {
- Key key;
- if (!key.init(orig, 0, 0)) {
+ SkIRect bounds = get_bounds_from_bitmap(orig);
+ if (bounds.isEmpty()) {
return NULL;
}
-
+ Key key(orig.getGenerationID(), 0, 0, bounds);
Rec* rec = SkNEW_ARGS(Rec, (key, mip));
- this->addToHead(rec);
- SkASSERT(1 == rec->fLockCount);
-
-#ifdef USE_HASH
- fHash->add(rec);
-#endif
-
- // We may (now) be overbudget, so see if we need to purge something.
- this->purgeAsNeeded();
- return (ID*)rec;
+ this->addAndLock(rec);
+ return rec_to_id(rec);
}
void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
@@ -285,7 +335,7 @@ void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
bool found = false;
Rec* rec = fHead;
while (rec != NULL) {
- if ((ID*)rec == id) {
+ if (rec == id_to_rec(id)) {
found = true;
break;
}
@@ -294,7 +344,7 @@ void SkScaledImageCache::unlock(SkScaledImageCache::ID* id) {
SkASSERT(found);
}
#endif
- Rec* rec = (Rec*)id;
+ Rec* rec = id_to_rec(id);
SkASSERT(rec->fLockCount > 0);
rec->fLockCount -= 1;
@@ -451,14 +501,38 @@ void SkScaledImageCache::validate() const {
SK_DECLARE_STATIC_MUTEX(gMutex);
+static void create_cache(SkScaledImageCache** cache) {
+ *cache = SkNEW_ARGS(SkScaledImageCache, (SK_DEFAULT_IMAGE_CACHE_LIMIT));
+}
+
static SkScaledImageCache* get_cache() {
- static SkScaledImageCache* gCache;
- if (!gCache) {
- gCache = SkNEW_ARGS(SkScaledImageCache, (SK_DEFAULT_IMAGE_CACHE_LIMIT));
- }
+ static SkScaledImageCache* gCache(NULL);
+ SK_DECLARE_STATIC_ONCE(create_cache_once);
+ SkOnce<SkScaledImageCache**>(&create_cache_once, create_cache, &gCache);
+ SkASSERT(NULL != gCache);
return gCache;
}
+
+SkScaledImageCache::ID* SkScaledImageCache::FindAndLock(
+ uint32_t pixelGenerationID,
+ int32_t width,
+ int32_t height,
+ SkBitmap* scaled) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->findAndLock(pixelGenerationID, width, height, scaled);
+}
+
+SkScaledImageCache::ID* SkScaledImageCache::AddAndLock(
+ uint32_t pixelGenerationID,
+ int32_t width,
+ int32_t height,
+ const SkBitmap& scaled) {
+ SkAutoMutexAcquire am(gMutex);
+ return get_cache()->addAndLock(pixelGenerationID, width, height, scaled);
+}
+
+
SkScaledImageCache::ID* SkScaledImageCache::FindAndLock(const SkBitmap& orig,
SkScalar scaleX,
SkScalar scaleY,
diff --git a/src/core/SkScaledImageCache.h b/src/core/SkScaledImageCache.h
index 32474b7f61..fee69d2d58 100644
--- a/src/core/SkScaledImageCache.h
+++ b/src/core/SkScaledImageCache.h
@@ -31,13 +31,25 @@ public:
* instance of this cache.
*/
+ static ID* FindAndLock(uint32_t pixelGenerationID,
+ int32_t width,
+ int32_t height,
+ SkBitmap* returnedBitmap);
+
static ID* FindAndLock(const SkBitmap& original, SkScalar scaleX,
- SkScalar scaleY, SkBitmap* scaled);
- static ID* FindAndLockMip(const SkBitmap& original, SkMipMap const**);
+ SkScalar scaleY, SkBitmap* returnedBitmap);
+ static ID* FindAndLockMip(const SkBitmap& original,
+ SkMipMap const** returnedMipMap);
+
+
+ static ID* AddAndLock(uint32_t pixelGenerationID,
+ int32_t width,
+ int32_t height,
+ const SkBitmap& bitmap);
static ID* AddAndLock(const SkBitmap& original, SkScalar scaleX,
- SkScalar scaleY, const SkBitmap& scaled);
- static ID* AddAndLockMip(const SkBitmap& original, const SkMipMap*);
+ SkScalar scaleY, const SkBitmap& bitmap);
+ static ID* AddAndLockMip(const SkBitmap& original, const SkMipMap* mipMap);
static void Unlock(ID*);
@@ -51,24 +63,48 @@ public:
~SkScaledImageCache();
/**
- * Search the cache for a scaled version of original. If found, return it
- * in scaled, and return its ID pointer. Use the returned ptr to unlock
- * the cache when you are done using scaled.
+ * Search the cache for a matching bitmap (using generationID,
+ * width, and height as a search key). If found, return it in
+ * returnedBitmap, and return its ID pointer. Use the returned
+ * ptr to unlock the cache when you are done using
+ * returnedBitmap.
+ *
+ * If a match is not found, returnedBitmap will be unmodifed, and
+ * NULL will be returned.
+ *
+ * This is used if there is no scaling or subsetting, for example
+ * by SkLazyPixelRef.
+ */
+ ID* findAndLock(uint32_t pixelGenerationID, int32_t width, int32_t height,
+ SkBitmap* returnedBitmap);
+
+ /**
+ * Search the cache for a scaled version of original. If found,
+ * return it in returnedBitmap, and return its ID pointer. Use
+ * the returned ptr to unlock the cache when you are done using
+ * returnedBitmap.
*
- * If a match is not found, scaled will be unmodifed, and NULL will be
- * returned.
+ * If a match is not found, returnedBitmap will be unmodifed, and
+ * NULL will be returned.
*/
ID* findAndLock(const SkBitmap& original, SkScalar scaleX,
- SkScalar scaleY, SkBitmap* scaled);
- ID* findAndLockMip(const SkBitmap& original, SkMipMap const**);
+ SkScalar scaleY, SkBitmap* returnedBitmap);
+ ID* findAndLockMip(const SkBitmap& original,
+ SkMipMap const** returnedMipMap);
/**
- * To add a new (scaled) bitmap to the cache, call AddAndLock. Use the
- * returned ptr to unlock the cache when you are done using scaled.
+ * To add a new bitmap (or mipMap) to the cache, call
+ * AddAndLock. Use the returned ptr to unlock the cache when you
+ * are done using scaled.
+ *
+ * Use (generationID, width, and height) or (original, scaleX,
+ * scaleY) or (original) as a search key
*/
+ ID* addAndLock(uint32_t pixelGenerationID, int32_t width, int32_t height,
+ const SkBitmap& bitmap);
ID* addAndLock(const SkBitmap& original, SkScalar scaleX,
- SkScalar scaleY, const SkBitmap& scaled);
- ID* addAndLockMip(const SkBitmap& original, const SkMipMap*);
+ SkScalar scaleY, const SkBitmap& bitmap);
+ ID* addAndLockMip(const SkBitmap& original, const SkMipMap* mipMap);
/**
* Given a non-null ID ptr returned by either findAndLock or addAndLock,
@@ -101,7 +137,9 @@ private:
size_t fByteLimit;
int fCount;
- Rec* findAndLock(const SkBitmap& original, SkScalar sx, SkScalar sy);
+ Rec* findAndLock(uint32_t generationID, SkScalar sx, SkScalar sy,
+ const SkIRect& bounds);
+ void addAndLock(Rec* rec);
void purgeAsNeeded();
@@ -115,5 +153,4 @@ private:
void validate() const {}
#endif
};
-
#endif
diff --git a/src/lazy/SkLazyPixelRef.cpp b/src/lazy/SkLazyPixelRef.cpp
index 0454362b94..17bdff4b47 100644
--- a/src/lazy/SkLazyPixelRef.cpp
+++ b/src/lazy/SkLazyPixelRef.cpp
@@ -11,6 +11,7 @@
#include "SkData.h"
#include "SkImageCache.h"
#include "SkImagePriv.h"
+#include "SkScaledImageCache.h"
#if LAZY_CACHE_STATS
#include "SkThread.h"
@@ -22,9 +23,9 @@ int32_t SkLazyPixelRef::gCacheMisses;
SkLazyPixelRef::SkLazyPixelRef(SkData* data, SkBitmapFactory::DecodeProc proc, SkImageCache* cache)
// Pass NULL for the Mutex so that the default (ring buffer) will be used.
: INHERITED(NULL)
+ , fErrorInDecoding(false)
, fDecodeProc(proc)
, fImageCache(cache)
- , fCacheId(SkImageCache::UNINITIALIZED_ID)
, fRowBytes(0) {
SkASSERT(fDecodeProc != NULL);
if (NULL == data) {
@@ -35,8 +36,12 @@ SkLazyPixelRef::SkLazyPixelRef(SkData* data, SkBitmapFactory::DecodeProc proc, S
fData->ref();
fErrorInDecoding = data->size() == 0;
}
- SkASSERT(cache != NULL);
- cache->ref();
+ if (fImageCache != NULL) {
+ fImageCache->ref();
+ fCacheId = SkImageCache::UNINITIALIZED_ID;
+ } else {
+ fScaledCacheId = NULL;
+ }
// mark as uninitialized -- all fields are -1
memset(&fLazilyCachedInfo, 0xFF, sizeof(fLazilyCachedInfo));
@@ -48,6 +53,14 @@ SkLazyPixelRef::SkLazyPixelRef(SkData* data, SkBitmapFactory::DecodeProc proc, S
SkLazyPixelRef::~SkLazyPixelRef() {
SkASSERT(fData != NULL);
fData->unref();
+ if (NULL == fImageCache) {
+ if (fScaledCacheId != NULL) {
+ SkScaledImageCache::Unlock(fScaledCacheId);
+ // TODO(halcanary): SkScaledImageCache needs a
+ // throwAwayCache(id) method.
+ }
+ return;
+ }
SkASSERT(fImageCache);
if (fCacheId != SkImageCache::UNINITIALIZED_ID) {
fImageCache->throwAwayCache(fCacheId);
@@ -79,10 +92,91 @@ const SkImage::Info* SkLazyPixelRef::getCachedInfo() {
return &fLazilyCachedInfo;
}
+/**
+ Returns bitmap->getPixels() on success; NULL on failure */
+static void* decode_into_bitmap(SkImage::Info* info,
+ SkBitmapFactory::DecodeProc decodeProc,
+ size_t* rowBytes,
+ SkData* data,
+ SkBitmap* bm) {
+ SkASSERT(info && decodeProc && rowBytes && data && bm);
+ if (!(bm->setConfig(SkImageInfoToBitmapConfig(*info), info->fWidth,
+ info->fHeight, *rowBytes, info->fAlphaType)
+ && bm->allocPixels(NULL, NULL))) {
+ // Use the default allocator. It may be necessary for the
+ // SkLazyPixelRef to have a allocator field which is passed
+ // into allocPixels().
+ return NULL;
+ }
+ SkBitmapFactory::Target target;
+ target.fAddr = bm->getPixels();
+ target.fRowBytes = bm->rowBytes();
+ *rowBytes = target.fRowBytes;
+ if (!decodeProc(data->data(), data->size(), info, &target)) {
+ return NULL;
+ }
+ return target.fAddr;
+}
+
+void* SkLazyPixelRef::lockScaledImageCachePixels() {
+ SkASSERT(!fErrorInDecoding);
+ SkASSERT(NULL == fImageCache);
+ SkBitmap bitmap;
+ const SkImage::Info* info = this->getCachedInfo();
+ if (info == NULL) {
+ return NULL;
+ }
+ // If this is the first time though, this is guaranteed to fail.
+ // Maybe we should have a flag that says "don't even bother looking"
+ fScaledCacheId = SkScaledImageCache::FindAndLock(this->getGenerationID(),
+ info->fWidth,
+ info->fHeight,
+ &bitmap);
+ if (fScaledCacheId != NULL) {
+ SkAutoLockPixels autoLockPixels(bitmap);
+ void* pixels = bitmap.getPixels();
+ SkASSERT(NULL != pixels);
+ // At this point, the autoLockPixels will unlockPixels()
+ // to remove bitmap's lock on the pixels. We will then
+ // destroy bitmap. The *only* guarantee that this pointer
+ // remains valid is the guarantee made by
+ // SkScaledImageCache that it will not destroy the *other*
+ // bitmap (SkScaledImageCache::Rec.fBitmap) that holds a
+ // reference to the concrete PixelRef while this record is
+ // locked.
+ return pixels;
+ } else {
+ // Cache has been purged, must re-decode.
+ void* pixels = decode_into_bitmap(const_cast<SkImage::Info*>(info),
+ fDecodeProc, &fRowBytes, fData,
+ &bitmap);
+ if (NULL == pixels) {
+ fErrorInDecoding = true;
+ return NULL;
+ }
+ fScaledCacheId = SkScaledImageCache::AddAndLock(this->getGenerationID(),
+ info->fWidth,
+ info->fHeight,
+ bitmap);
+ SkASSERT(fScaledCacheId != NULL);
+ return pixels;
+ }
+}
+
void* SkLazyPixelRef::onLockPixels(SkColorTable**) {
if (fErrorInDecoding) {
return NULL;
}
+ if (NULL == fImageCache) {
+ return this->lockScaledImageCachePixels();
+ } else {
+ return this->lockImageCachePixels();
+ }
+}
+
+void* SkLazyPixelRef::lockImageCachePixels() {
+ SkASSERT(fImageCache != NULL);
+ SkASSERT(!fErrorInDecoding);
SkBitmapFactory::Target target;
// Check to see if the pixels still exist in the cache.
if (SkImageCache::UNINITIALIZED_ID == fCacheId) {
@@ -147,8 +241,19 @@ void SkLazyPixelRef::onUnlockPixels() {
if (fErrorInDecoding) {
return;
}
- if (fCacheId != SkImageCache::UNINITIALIZED_ID) {
- fImageCache->releaseCache(fCacheId);
+ if (NULL == fImageCache) {
+ // onUnlockPixels() should never be called a second time from
+ // PixelRef::Unlock() without calling onLockPixels() first.
+ SkASSERT(NULL != fScaledCacheId);
+ if (NULL != fScaledCacheId) {
+ SkScaledImageCache::Unlock(fScaledCacheId);
+ fScaledCacheId = NULL;
+ }
+ } else { // use fImageCache
+ SkASSERT(SkImageCache::UNINITIALIZED_ID != fCacheId);
+ if (SkImageCache::UNINITIALIZED_ID != fCacheId) {
+ fImageCache->releaseCache(fCacheId);
+ }
}
}
@@ -157,8 +262,6 @@ SkData* SkLazyPixelRef::onRefEncodedData() {
return fData;
}
-#include "SkImagePriv.h"
-
static bool init_from_info(SkBitmap* bm, const SkImage::Info& info,
size_t rowBytes) {
SkBitmap::Config config = SkImageInfoToBitmapConfig(info);
@@ -206,3 +309,4 @@ bool SkLazyPixelRef::onDecodeInto(int pow2, SkBitmap* bitmap) {
*bitmap = tmp;
return true;
}
+
diff --git a/src/lazy/SkLazyPixelRef.h b/src/lazy/SkLazyPixelRef.h
index 8f7a751e95..c7fbd7be65 100644
--- a/src/lazy/SkLazyPixelRef.h
+++ b/src/lazy/SkLazyPixelRef.h
@@ -10,8 +10,10 @@
#include "SkBitmapFactory.h"
#include "SkImage.h"
+#include "SkImageCache.h"
#include "SkPixelRef.h"
#include "SkFlattenable.h"
+#include "SkScaledImageCache.h"
class SkColorTable;
class SkData;
@@ -33,8 +35,9 @@ public:
* Create a new SkLazyPixelRef.
* @param SkData Encoded data representing the pixels.
* @param DecodeProc Called to decode the pixels when needed. Must be non-NULL.
- * @param SkImageCache Object that handles allocating and freeing the pixel memory, as needed.
- * Must not be NULL.
+ * @param SkImageCache Object that handles allocating and freeing
+ * the pixel memory, as needed. If NULL, use the global
+ * SkScaledImageCache.
*/
SkLazyPixelRef(SkData*, SkBitmapFactory::DecodeProc, SkImageCache*);
@@ -69,7 +72,10 @@ private:
SkData* fData;
SkBitmapFactory::DecodeProc fDecodeProc;
SkImageCache* fImageCache;
- intptr_t fCacheId;
+ union {
+ SkImageCache::ID fCacheId;
+ SkScaledImageCache::ID* fScaledCacheId;
+ };
size_t fRowBytes;
SkImage::Info fLazilyCachedInfo;
@@ -80,6 +86,9 @@ private:
// lazily initialized our cached info. Returns NULL on failure.
const SkImage::Info* getCachedInfo();
+ void* lockScaledImageCachePixels();
+ void* lockImageCachePixels();
+
typedef SkPixelRef INHERITED;
};
diff --git a/tests/CachedDecodingPixelRefTest.cpp b/tests/CachedDecodingPixelRefTest.cpp
new file mode 100644
index 0000000000..ded4ca27bb
--- /dev/null
+++ b/tests/CachedDecodingPixelRefTest.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkBitmap.h"
+#include "SkCanvas.h"
+#include "SkData.h"
+#include "SkForceLinking.h"
+#include "SkImageDecoder.h"
+#include "SkImagePriv.h"
+#include "SkLazyPixelRef.h"
+#include "SkScaledImageCache.h"
+#include "SkStream.h"
+#include "Test.h"
+
+__SK_FORCE_IMAGE_DECODER_LINKING;
+
+/**
+ * Fill this bitmap with some color.
+ */
+static void make_test_image(SkBitmap* bm) {
+ static const int W = 50, H = 50;
+ static const SkBitmap::Config config = SkBitmap::kARGB_8888_Config;
+ bm->setConfig(config, W, H);
+ bm->allocPixels();
+ bm->eraseColor(SK_ColorBLACK);
+ SkCanvas canvas(*bm);
+ SkPaint paint;
+ paint.setColor(SK_ColorBLUE);
+ canvas.drawRectCoords(0, 0, SkIntToScalar(W/2),
+ SkIntToScalar(H/2), paint);
+ paint.setColor(SK_ColorWHITE);
+ canvas.drawRectCoords(SkIntToScalar(W/2), SkIntToScalar(H/2),
+ SkIntToScalar(W), SkIntToScalar(H), paint);
+}
+
+/**
+ * encode this bitmap into some data via SkImageEncoder
+ */
+static SkData* create_data_from_bitmap(const SkBitmap& bm,
+ SkImageEncoder::Type type) {
+ SkDynamicMemoryWStream stream;
+ if (SkImageEncoder::EncodeStream(&stream, bm, type, 100)) {
+ return stream.copyToData();
+ }
+ return NULL;
+}
+
+/**
+ * A simplified version of SkBitmapFactory
+ */
+static bool simple_bitmap_factory(SkBitmapFactory::DecodeProc proc,
+ SkData* data,
+ SkBitmap* dst) {
+ SkImage::Info info;
+ if (!proc(data->data(), data->size(), &info, NULL)) {
+ return false;
+ }
+ dst->setConfig(SkImageInfoToBitmapConfig(info), info.fWidth,
+ info.fHeight, 0, info.fAlphaType);
+ SkAutoTUnref<SkLazyPixelRef> ref(SkNEW_ARGS(SkLazyPixelRef,
+ (data, proc, NULL)));
+ dst->setPixelRef(ref);
+ return true;
+}
+
+static void compare_bitmaps(skiatest::Reporter* reporter,
+ const SkBitmap& b1, const SkBitmap& b2,
+ bool pixelPerfect = true) {
+ REPORTER_ASSERT(reporter, b1.empty() == b2.empty());
+ REPORTER_ASSERT(reporter, b1.width() == b2.width());
+ REPORTER_ASSERT(reporter, b1.height() == b2.height());
+ REPORTER_ASSERT(reporter, b1.isNull() == b2.isNull());
+ SkAutoLockPixels autoLockPixels1(b1);
+ SkAutoLockPixels autoLockPixels2(b2);
+ REPORTER_ASSERT(reporter, b1.isNull() == b2.isNull());
+ if (b1.isNull() || b1.empty()) {
+ return;
+ }
+ REPORTER_ASSERT(reporter, NULL != b1.getPixels());
+ REPORTER_ASSERT(reporter, NULL != b2.getPixels());
+ if ((!(b1.getPixels())) || (!(b2.getPixels()))) {
+ return;
+ }
+ if ((b1.width() != b2.width()) ||
+ (b1.height() != b2.height())) {
+ return;
+ }
+ if (!pixelPerfect) {
+ return;
+ }
+ int pixelErrors = 0;
+ for (int y = 0; y < b2.height(); ++y) {
+ for (int x = 0; x < b2.width(); ++x) {
+ if (b1.getColor(x, y) != b2.getColor(x, y)) {
+ ++pixelErrors;
+ }
+ }
+ }
+ REPORTER_ASSERT(reporter, 0 == pixelErrors);
+}
+
+/**
+ * This checks to see that a SkLazyPixelRef works as advertized.
+ */
+#include "TestClassDef.h"
+DEF_TEST(CachedDecodingPixelRefTest, reporter) {
+ SkBitmap original;
+ make_test_image(&original);
+ const size_t bitmapSize = original.getSize();
+ const size_t oldByteLimit = SkScaledImageCache::GetByteLimit();
+ REPORTER_ASSERT(reporter, (!(original.empty())) && (!(original.isNull())));
+
+ static const SkImageEncoder::Type types[] = {
+ SkImageEncoder::kPNG_Type,
+ SkImageEncoder::kJPEG_Type,
+ SkImageEncoder::kWEBP_Type
+ };
+
+ for (size_t i = 0; i < SK_ARRAY_COUNT(types); i++) {
+ SkImageEncoder::Type type = types[i];
+ SkAutoDataUnref encoded(create_data_from_bitmap(original, type));
+ REPORTER_ASSERT(reporter, encoded.get() != NULL);
+ if (NULL == encoded.get()) {
+ continue;
+ }
+ SkBitmap lazy;
+ static const SkBitmapFactory::DecodeProc decoder =
+ &(SkImageDecoder::DecodeMemoryToTarget);
+ bool success = simple_bitmap_factory(decoder, encoded.get(), &lazy);
+
+ REPORTER_ASSERT(reporter, success);
+
+ size_t bytesUsed = SkScaledImageCache::GetBytesUsed();
+
+ if (oldByteLimit < bitmapSize) {
+ SkScaledImageCache::SetByteLimit(bitmapSize + oldByteLimit);
+ }
+ void* lazyPixels = NULL;
+
+ // Since this is lazy, it shouldn't have fPixels yet!
+ REPORTER_ASSERT(reporter, NULL == lazy.getPixels());
+ {
+ SkAutoLockPixels autoLockPixels(lazy); // now pixels are good.
+ lazyPixels = lazy.getPixels();
+ REPORTER_ASSERT(reporter, NULL != lazy.getPixels());
+ // first time we lock pixels, we should get bump in the size
+ // of the cache by exactly bitmapSize.
+ REPORTER_ASSERT(reporter, bytesUsed + bitmapSize
+ == SkScaledImageCache::GetBytesUsed());
+ bytesUsed = SkScaledImageCache::GetBytesUsed();
+ }
+ // pixels should be gone!
+ REPORTER_ASSERT(reporter, NULL == lazy.getPixels());
+ {
+ SkAutoLockPixels autoLockPixels(lazy); // now pixels are good.
+ REPORTER_ASSERT(reporter, NULL != lazy.getPixels());
+
+ // verify that the same pixels are used this time.
+ REPORTER_ASSERT(reporter, lazy.getPixels() == lazyPixels);
+ }
+
+ bool comparePixels = (SkImageEncoder::kPNG_Type == type);
+ // Only PNG is pixel-perfect.
+ compare_bitmaps(reporter, original, lazy, comparePixels);
+
+ // force the cache to clear by making it too small.
+ SkScaledImageCache::SetByteLimit(bitmapSize / 2);
+ compare_bitmaps(reporter, original, lazy, comparePixels);
+
+ // I'm pretty sure that the logic of the cache should mean
+ // that it will clear to zero, regardless of where it started.
+ REPORTER_ASSERT(reporter, SkScaledImageCache::GetBytesUsed() == 0);
+ // TODO(someone) - write a custom allocator that can verify
+ // that the memory where those pixels were cached really did
+ // get freed.
+
+ ////////////////////////////////////////////////////////////////////////
+ // The following commented-out code happens to work on my
+ // machine, and indicates to me that the SkLazyPixelRef is
+ // behaving as designed. But I don't know an easy way to
+ // guarantee that a second allocation of the same size will
+ // give a different address.
+ ////////////////////////////////////////////////////////////////////////
+ // {
+ // // confuse the heap allocation system
+ // SkAutoMalloc autoMalloc(bitmapSize);
+ // REPORTER_ASSERT(reporter, autoMalloc.get() == lazyPixels);
+ // {
+ // SkAutoLockPixels autoLockPixels(lazy);
+ // // verify that *different* pixels are used this time.
+ // REPORTER_ASSERT(reporter, lazy.getPixels() != lazyPixels);
+ // compare_bitmaps(reporter, original, lazy, comparePixels);
+ // }
+ // }
+
+ // restore cache size
+ SkScaledImageCache::SetByteLimit(oldByteLimit);
+ }
+}
+