aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Robert Phillips <robertphillips@google.com>2018-07-18 13:56:48 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-07-18 18:51:02 +0000
commit715d08c381b4cc8af33d7dcdefc9533dcc97e4c9 (patch)
tree328830165cfa268c5a6645e30088472836283a18 /src
parent946c37057f2618af7eda34fd6d2dd8625a9e9b61 (diff)
Fix explicit allocation bug
Change-Id: I9866f563e02b2ab290cc46ede05f8eda21f6d3b2 Reviewed-on: https://skia-review.googlesource.com/142163 Commit-Queue: Robert Phillips <robertphillips@google.com> Reviewed-by: Brian Salomon <bsalomon@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/gpu/GrResourceAllocator.cpp63
-rw-r--r--src/gpu/GrResourceAllocator.h9
-rw-r--r--src/gpu/GrSurfaceProxyPriv.h6
3 files changed, 75 insertions, 3 deletions
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index d3895073aa..d253ca2c1a 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -138,7 +138,7 @@ void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
}
// 'surface' can be reused. Add it back to the free pool.
-void GrResourceAllocator::freeUpSurface(sk_sp<GrSurface> surface) {
+void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) {
const GrScratchKey &key = surface->resourcePriv().getScratchKey();
if (!key.isValid()) {
@@ -152,6 +152,9 @@ void GrResourceAllocator::freeUpSurface(sk_sp<GrSurface> surface) {
return;
}
+#if GR_ALLOCATION_SPEW
+ SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt());
+#endif
// TODO: fix this insertion so we get a more LRU-ish behavior
fFreePool.insert(key, surface.release());
}
@@ -192,10 +195,18 @@ void GrResourceAllocator::expire(unsigned int curIndex) {
Interval* temp = fActiveIntvls.popHead();
if (temp->wasAssignedSurface()) {
- this->freeUpSurface(temp->detachSurface());
+ sk_sp<GrSurface> surface = temp->detachSurface();
+
+ // If the proxy has an actual live ref on it that means someone wants to retain its
+ // contents. In that case we cannot recycle it (until the external holder lets
+ // go of it).
+ if (0 == temp->proxy()->priv().getProxyRefCnt()) {
+ this->recycleSurface(std::move(surface));
+ }
}
// Add temp to the free interval list so it can be reused
+ SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface
temp->setNext(fFreeIntervalList);
fFreeIntervalList = temp;
}
@@ -222,6 +233,9 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex,
SkDEBUGCODE(fAssigned = true;)
+#if GR_ALLOCATION_SPEW
+ this->dumpIntervals();
+#endif
while (Interval* cur = fIntvlList.popHead()) {
if (fEndOfOpListOpIndices[fCurOpListIndex] < cur->start()) {
fCurOpListIndex++;
@@ -269,6 +283,12 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex,
SkASSERT(surface->getUniqueKey() == tex->getUniqueKey());
}
+#if GR_ALLOCATION_SPEW
+ SkDebugf("Assigning %d to %d\n",
+ surface->uniqueID().asUInt(),
+ cur->proxy()->uniqueID().asUInt());
+#endif
+
cur->assign(std::move(surface));
} else {
SkASSERT(!cur->proxy()->priv().isInstantiated());
@@ -291,3 +311,42 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex,
this->expire(std::numeric_limits<unsigned int>::max());
return true;
}
+
+#if GR_ALLOCATION_SPEW
+void GrResourceAllocator::dumpIntervals() {
+
+ // Print all the intervals while computing their range
+ unsigned int min = fNumOps+1;
+ unsigned int max = 0;
+ for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
+ SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d R:%d W:%d\n",
+ cur->proxy()->uniqueID().asUInt(),
+ cur->proxy()->priv().isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt()
+ : -1,
+ cur->start(),
+ cur->end(),
+ cur->proxy()->priv().getProxyRefCnt(),
+ cur->proxy()->getBackingRefCnt_TestOnly(),
+ cur->proxy()->getPendingReadCnt_TestOnly(),
+ cur->proxy()->getPendingWriteCnt_TestOnly());
+ min = SkTMin(min, cur->start());
+ max = SkTMax(max, cur->end());
+ }
+
+ // Draw a graph of the useage intervals
+ for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
+ SkDebugf("{ %3d,%3d }: ",
+ cur->proxy()->uniqueID().asUInt(),
+ cur->proxy()->priv().isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt()
+ : -1);
+ for (unsigned int i = min; i <= max; ++i) {
+ if (i >= cur->start() && i <= cur->end()) {
+ SkDebugf("x");
+ } else {
+ SkDebugf(" ");
+ }
+ }
+ SkDebugf("\n");
+ }
+}
+#endif
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index e71e3558c2..f383ee4653 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -19,6 +19,9 @@
class GrResourceProvider;
class GrUninstantiateProxyTracker;
+// Print out explicit allocation information
+#define GR_ALLOCATION_SPEW 0
+
/*
* The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
* being given the usage intervals of the various proxies. It keeps these intervals in a singly
@@ -74,6 +77,10 @@ public:
void markEndOfOpList(int opListIndex);
+#if GR_ALLOCATION_SPEW
+ void dumpIntervals();
+#endif
+
private:
class Interval;
@@ -81,7 +88,7 @@ private:
void expire(unsigned int curIndex);
// These two methods wrap the interactions with the free pool
- void freeUpSurface(sk_sp<GrSurface> surface);
+ void recycleSurface(sk_sp<GrSurface> surface);
sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy, bool needsStencil);
struct FreePoolTraits {
diff --git a/src/gpu/GrSurfaceProxyPriv.h b/src/gpu/GrSurfaceProxyPriv.h
index 8d7d8d02d7..a5113f22d5 100644
--- a/src/gpu/GrSurfaceProxyPriv.h
+++ b/src/gpu/GrSurfaceProxyPriv.h
@@ -37,6 +37,12 @@ public:
return fProxy->fTarget ? fProxy->fTarget->asRenderTarget() : nullptr;
}
+ // Beware! Woe betide anyone whosoever calls this method.
+ // The refs on proxies and their backing GrSurfaces shift around based on whether the proxy
+ // is instantiated or not. Additionally, the lifetime of a proxy (and a GrSurface) also
+ // depends on the read and write refs (So this method can validly return 0).
+ int32_t getProxyRefCnt() const { return fProxy->getProxyRefCnt(); }
+
// Beware! This call is only guaranteed to tell you if the proxy in question has
// any pending IO in its current state. It won't tell you about the IO state in the
// future when the proxy is actually used/instantiated.