aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar robertphillips <robertphillips@google.com>2014-10-10 11:38:29 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-10-10 11:38:29 -0700
commit7bb9ed756e8663afe68e1a5fc680d57f83a31fea (patch)
tree7c9eb9b8f282521f42aecb35b5623dbdff24dca2
parent294c32612d712eb56361ac5439271a91ae96862e (diff)
Fix bug in GrCachedLayer reuse
In the new MultiPictureDraw tests a single hoisted layer is reused multiple times. The previous plot locking scheme allowed GrCachedLayer objects to be aggressively deleted prematurely leaving the reusing GrHoistedLayer objects with dangling pointers. This CL changes adds a new pseudo-ref to GrCachedLayer. (It can't be a real ref since the cached layers aren't deleted when it goes to 0). NOTRY=true Committed: https://skia.googlesource.com/skia/+/5c481666c9678f43e039ad605457be3854cf8de3 Review URL: https://codereview.chromium.org/640323002
-rw-r--r--src/gpu/GrLayerCache.cpp22
-rw-r--r--src/gpu/GrLayerCache.h33
-rw-r--r--src/gpu/GrLayerHoister.cpp22
-rw-r--r--tests/GpuLayerCacheTest.cpp15
4 files changed, 65 insertions, 27 deletions
diff --git a/src/gpu/GrLayerCache.cpp b/src/gpu/GrLayerCache.cpp
index 0481d144e5..570b2d6eaf 100644
--- a/src/gpu/GrLayerCache.cpp
+++ b/src/gpu/GrLayerCache.cpp
@@ -16,7 +16,6 @@ void GrCachedLayer::validate(const GrTexture* backingTexture) const {
SkASSERT(SK_InvalidGenID != fKey.pictureID());
SkASSERT(fKey.start() >= 0);
-
if (fTexture) {
// If the layer is in some texture then it must occupy some rectangle
SkASSERT(!fRect.isEmpty());
@@ -43,6 +42,15 @@ void GrCachedLayer::validate(const GrTexture* backingTexture) const {
SkASSERT(fTexture);
SkASSERT(!fRect.isEmpty());
}
+
+ // Unfortunately there is a brief time where a layer can be locked
+ // but not used, so we can only check the "used implies locked"
+ // invariant.
+ if (fUses > 0) {
+ SkASSERT(fLocked);
+ } else {
+ SkASSERT(0 == fUses);
+ }
}
class GrAutoValidateLayer : ::SkNoncopyable {
@@ -81,6 +89,7 @@ GrLayerCache::~GrLayerCache() {
SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key>::Iter iter(&fLayerHash);
for (; !iter.done(); ++iter) {
GrCachedLayer* layer = &(*iter);
+ SkASSERT(0 == layer->uses());
this->unlock(layer);
SkDELETE(layer);
}
@@ -168,7 +177,7 @@ bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool do
// Hooray it is still in the atlas - make sure it stays there
SkASSERT(!dontAtlas);
layer->setLocked(true);
- fPlotLocks[layer->plot()->id()]++;
+ this->incPlotLock(layer->plot()->id());
return false;
} else if (!dontAtlas && PlausiblyAtlasable(desc.fWidth, desc.fHeight)) {
// Not in the atlas - will it fit?
@@ -193,7 +202,7 @@ bool GrLayerCache::lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool do
layer->setTexture(fAtlas->getTexture(), bounds);
layer->setPlot(plot);
layer->setLocked(true);
- fPlotLocks[layer->plot()->id()]++;
+ this->incPlotLock(layer->plot()->id());
return true;
}
@@ -227,8 +236,7 @@ void GrLayerCache::unlock(GrCachedLayer* layer) {
if (layer->isAtlased()) {
const int plotID = layer->plot()->id();
- SkASSERT(fPlotLocks[plotID] > 0);
- fPlotLocks[plotID]--;
+ this->decPlotLock(plotID);
// At this point we could aggressively clear out un-locked plots but
// by delaying we may be able to reuse some of the atlased layers later.
#if DISABLE_CACHING
@@ -270,7 +278,7 @@ void GrLayerCache::validate() const {
SkASSERT(!pictInfo->fPlotUsage.isEmpty());
#endif
} else {
- // If there is no picture info for this layer then all of its
+ // If there is no picture info for this picture then all of its
// layers should be non-atlased.
SkASSERT(!layer->isAtlased());
}
@@ -321,6 +329,7 @@ void GrLayerCache::purge(uint32_t pictureID) {
}
for (int i = 0; i < toBeRemoved.count(); ++i) {
+ SkASSERT(0 == toBeRemoved[i]->uses());
this->unlock(toBeRemoved[i]);
fLayerHash.remove(GrCachedLayer::GetKey(*toBeRemoved[i]));
SkDELETE(toBeRemoved[i]);
@@ -366,6 +375,7 @@ void GrLayerCache::purgePlot(GrPlot* plot) {
}
for (int i = 0; i < toBeRemoved.count(); ++i) {
+ SkASSERT(0 == toBeRemoved[i]->uses());
SkASSERT(!toBeRemoved[i]->locked());
uint32_t pictureIDToRemove = toBeRemoved[i]->pictureID();
diff --git a/src/gpu/GrLayerCache.h b/src/gpu/GrLayerCache.h
index 790b82a8f3..3bccb437f8 100644
--- a/src/gpu/GrLayerCache.h
+++ b/src/gpu/GrLayerCache.h
@@ -97,6 +97,7 @@ public:
, fTexture(NULL)
, fRect(GrIRect16::MakeEmpty())
, fPlot(NULL)
+ , fUses(0)
, fLocked(false) {
SkASSERT(SK_InvalidGenID != pictureID && start >= 0 && stop >= 0);
}
@@ -155,6 +156,11 @@ private:
// It is always NULL for non-atlased layers.
GrPlot* fPlot;
+ // The number of actively hoisted layers using this cached image (e.g.,
+ // extant GrHoistedLayers pointing at this object). This object will
+ // be unlocked when the use count reaches 0.
+ int fUses;
+
// For non-atlased layers 'fLocked' should always match "fTexture".
// (i.e., if there is a texture it is locked).
// For atlased layers, 'fLocked' is true if the layer is in a plot and
@@ -162,6 +168,13 @@ private:
// actively required for rendering, then 'fLocked' is false. If the
// layer isn't in a plot then is can never be locked.
bool fLocked;
+
+ void addUse() { ++fUses; }
+ void removeUse() { SkASSERT(fUses > 0); --fUses; }
+ int uses() const { return fUses; }
+
+ friend class GrLayerCache; // for access to usage methods
+ friend class TestingAccess; // for testing
};
// The GrLayerCache caches pre-computed saveLayers for later rendering.
@@ -191,8 +204,15 @@ public:
// layer was found in the cache and can be reused.
bool lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool dontAtlas);
- // Inform the cache that layer's cached image is not currently required
- void unlock(GrCachedLayer* layer);
+ // addUse is just here to keep the API symmetric
+ void addUse(GrCachedLayer* layer) { layer->addUse(); }
+ void removeUse(GrCachedLayer* layer) {
+ layer->removeUse();
+ if (layer->uses() == 0) {
+ // If no one cares about the layer allow it to be recycled.
+ this->unlock(layer);
+ }
+ }
// Setup to be notified when 'picture' is deleted
void trackPicture(const SkPicture* picture);
@@ -236,6 +256,9 @@ private:
// Plots with a 0 lock count are open for recycling/purging.
int fPlotLocks[kNumPlotsX * kNumPlotsY];
+ // Inform the cache that layer's cached image is not currently required
+ void unlock(GrCachedLayer* layer);
+
void initAtlas();
GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop,
const SkMatrix& ctm, const SkPaint* paint);
@@ -255,6 +278,12 @@ private:
// was purged; false otherwise.
bool purgePlot();
+ void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; }
+ void decPlotLock(int plotIdx) {
+ SkASSERT(fPlotLocks[plotIdx] > 0);
+ --fPlotLocks[plotIdx];
+ }
+
// for testing
friend class TestingAccess;
int numLayers() const { return fLayerHash.count(); }
diff --git a/src/gpu/GrLayerHoister.cpp b/src/gpu/GrLayerHoister.cpp
index 00e8b99f45..e9e3a3f710 100644
--- a/src/gpu/GrLayerHoister.cpp
+++ b/src/gpu/GrLayerHoister.cpp
@@ -54,6 +54,7 @@ static void prepare_for_hoisting(GrLayerCache* layerCache,
hl = recycled->append();
}
+ layerCache->addUse(layer);
hl->fLayer = layer;
hl->fPicture = pict;
hl->fOffset = info.fOffset;
@@ -263,19 +264,6 @@ void GrLayerHoister::DrawLayers(const SkTDArray<GrHoistedLayer>& atlased,
convert_layers_to_replacements(recycled, replacements);
}
-static void unlock_layer_in_cache(GrLayerCache* layerCache,
- const SkPicture* picture,
- GrCachedLayer* layer) {
- layerCache->unlock(layer);
-
-#if DISABLE_CACHING
- // This code completely clears out the atlas. It is required when
- // caching is disabled so the atlas doesn't fill up and force more
- // free floating layers
- layerCache->purge(picture->uniqueID());
-#endif
-}
-
void GrLayerHoister::UnlockLayers(GrContext* context,
const SkTDArray<GrHoistedLayer>& atlased,
const SkTDArray<GrHoistedLayer>& nonAtlased,
@@ -283,15 +271,15 @@ void GrLayerHoister::UnlockLayers(GrContext* context,
GrLayerCache* layerCache = context->getLayerCache();
for (int i = 0; i < atlased.count(); ++i) {
- unlock_layer_in_cache(layerCache, atlased[i].fPicture, atlased[i].fLayer);
+ layerCache->removeUse(atlased[i].fLayer);
}
for (int i = 0; i < nonAtlased.count(); ++i) {
- unlock_layer_in_cache(layerCache, nonAtlased[i].fPicture, nonAtlased[i].fLayer);
+ layerCache->removeUse(nonAtlased[i].fLayer);
}
for (int i = 0; i < recycled.count(); ++i) {
- unlock_layer_in_cache(layerCache, recycled[i].fPicture, recycled[i].fLayer);
+ layerCache->removeUse(recycled[i].fLayer);
}
#if DISABLE_CACHING
@@ -300,5 +288,7 @@ void GrLayerHoister::UnlockLayers(GrContext* context,
// free floating layers
layerCache->purgeAll();
#endif
+
+ SkDEBUGCODE(layerCache->validate();)
}
diff --git a/tests/GpuLayerCacheTest.cpp b/tests/GpuLayerCacheTest.cpp
index 9e07056421..f7b2d6ef3e 100644
--- a/tests/GpuLayerCacheTest.cpp
+++ b/tests/GpuLayerCacheTest.cpp
@@ -21,6 +21,9 @@ public:
static void Purge(GrLayerCache* cache, uint32_t pictureID) {
cache->purge(pictureID);
}
+ static int Uses(GrCachedLayer* layer) {
+ return layer->uses();
+ }
};
// Add several layers to the cache
@@ -70,6 +73,10 @@ static void lock_layer(skiatest::Reporter* reporter,
REPORTER_ASSERT(reporter, layer->texture());
REPORTER_ASSERT(reporter, layer->locked());
+
+ cache->addUse(layer);
+
+ REPORTER_ASSERT(reporter, 1 == TestingAccess::Uses(layer));
}
// This test case exercises the public API of the GrLayerCache class.
@@ -120,20 +127,22 @@ DEF_GPUTEST(GpuLayerCache, reporter, factory) {
for (int i = 0; i < kInitialNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture->uniqueID(), i+1, SkMatrix::I());
REPORTER_ASSERT(reporter, layer);
- cache.unlock(layer);
+ cache.removeUse(layer);
}
for (int i = 0; i < kInitialNumLayers; ++i) {
GrCachedLayer* layer = cache.findLayer(picture->uniqueID(), i+1, SkMatrix::I());
REPORTER_ASSERT(reporter, layer);
+ // All the layers should be unlocked
REPORTER_ASSERT(reporter, !layer->locked());
+
// The first 4 layers should still be in the atlas.
if (i < 4) {
REPORTER_ASSERT(reporter, layer->texture());
REPORTER_ASSERT(reporter, layer->isAtlased());
} else {
- // The final layer should be unlocked.
+ // The final layer should not be atlased.
REPORTER_ASSERT(reporter, NULL == layer->texture());
REPORTER_ASSERT(reporter, !layer->isAtlased());
}
@@ -148,7 +157,7 @@ DEF_GPUTEST(GpuLayerCache, reporter, factory) {
REPORTER_ASSERT(reporter, layer);
lock_layer(reporter, &cache, layer);
- cache.unlock(layer);
+ cache.removeUse(layer);
}
for (int i = 0; i < kInitialNumLayers+1; ++i) {