aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Jim Van Verth <jvanverth@google.com>2017-09-22 14:23:22 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-09-22 18:43:48 +0000
commit7cf59526c9b1ab3682e875d449200e51f3b6ff32 (patch)
tree33b8858809191162841f44c3833b555f34a96ba3
parentc38548b3210e23090b0fa808cc0fdc16c8d55149 (diff)
Shrink GrDrawOpAtlases when no longer needed
Bug: skia:3550 Change-Id: I7b09aa2b0fd22ed99694f32f9f395392ef80e238 Reviewed-on: https://skia-review.googlesource.com/49901 Commit-Queue: Jim Van Verth <jvanverth@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com> Reviewed-by: Brian Salomon <bsalomon@google.com>
-rw-r--r--src/core/SkTInternalLList.h5
-rw-r--r--src/gpu/GrContext.cpp1
-rw-r--r--src/gpu/GrDrawOpAtlas.cpp90
-rw-r--r--src/gpu/GrDrawOpAtlas.h45
-rw-r--r--src/gpu/GrDrawingManager.cpp2
-rw-r--r--src/gpu/GrOnFlushResourceProvider.h5
-rw-r--r--src/gpu/GrPathRendererChain.cpp4
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp2
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.h2
-rw-r--r--src/gpu/ops/GrDrawOp.h4
-rw-r--r--src/gpu/ops/GrSmallPathRenderer.h12
-rw-r--r--src/gpu/text/GrAtlasGlyphCache.h16
12 files changed, 170 insertions, 18 deletions
diff --git a/src/core/SkTInternalLList.h b/src/core/SkTInternalLList.h
index ef4b74f633..df06d8a255 100644
--- a/src/core/SkTInternalLList.h
+++ b/src/core/SkTInternalLList.h
@@ -45,6 +45,11 @@ public:
, fTail(nullptr) {
}
+ void reset() {
+ fHead = nullptr;
+ fTail = nullptr;
+ }
+
void remove(T* entry) {
SkASSERT(fHead && fTail);
SkASSERT(this->isInList(entry));
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index ba26cc844e..9abf953abd 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -205,6 +205,7 @@ bool GrContext::init(const GrContextOptions& options) {
fDrawingManager.reset(new GrDrawingManager(this, prcOptions, &fSingleOwner));
fAtlasGlyphCache = new GrAtlasGlyphCache(this, options.fGlyphCacheTextureMaximumBytes);
+ this->contextPriv().addOnFlushCallbackObject(fAtlasGlyphCache);
fTextBlobCache.reset(new GrTextBlobCache(TextBlobCacheOverBudgetCB, this));
diff --git a/src/gpu/GrDrawOpAtlas.cpp b/src/gpu/GrDrawOpAtlas.cpp
index 22def06212..ed57b46b3f 100644
--- a/src/gpu/GrDrawOpAtlas.cpp
+++ b/src/gpu/GrDrawOpAtlas.cpp
@@ -36,6 +36,7 @@ GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, uint64_t genID, int offX
int width, int height, GrPixelConfig config)
: fLastUpload(GrDrawOpUploadToken::AlreadyFlushedToken())
, fLastUse(GrDrawOpUploadToken::AlreadyFlushedToken())
+ , fFlushesSinceLastUse(0)
, fPageIndex(pageIndex)
, fPlotIndex(plotIndex)
, fGenID(genID)
@@ -147,6 +148,7 @@ GrDrawOpAtlas::GrDrawOpAtlas(GrContext* context, GrPixelConfig config, int width
, fTextureWidth(width)
, fTextureHeight(height)
, fAtlasGeneration(kInvalidAtlasGeneration + 1)
+ , fPrevFlushToken(GrDrawOpUploadToken::AlreadyFlushedToken())
, fNumPages(0) {
fPlotWidth = fTextureWidth / numPlotsX;
@@ -308,6 +310,84 @@ bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width,
return true;
}
+void GrDrawOpAtlas::compact(GrDrawOpUploadToken startTokenForNextFlush) {
+ // Number of atlas-related flushes beyond which we consider a plot to no longer be in use.
+ //
+ // This value is somewhat arbitrary -- the idea is to keep it low enough that
+ // a page with unused plots will get removed reasonably quickly, but allow it
+ // to hang around for a bit in case it's needed. The assumption is that flushes
+ // are rare; i.e., we are not continually refreshing the frame.
+ static constexpr auto kRecentlyUsedCount = 8;
+
+ if (fNumPages <= 1) {
+ fPrevFlushToken = startTokenForNextFlush;
+ return;
+ }
+
+ // For all plots, update number of flushes since used, and check to see if there
+ // are any in the first pages that the last page can safely upload to.
+ PlotList::Iter plotIter;
+ int availablePlots = 0;
+ uint32_t lastPageIndex = fNumPages-1;
+ bool atlasUsedThisFlush = false;
+ for (uint32_t pageIndex = 0; pageIndex < fNumPages; ++pageIndex) {
+ plotIter.init(fPages[pageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
+ while (Plot* plot = plotIter.get()) {
+ // Update number of flushes since plot was last used
+ if (plot->lastUseToken().inInterval(fPrevFlushToken, startTokenForNextFlush)) {
+ plot->resetFlushesSinceLastUsed();
+ atlasUsedThisFlush = true;
+ } else {
+ plot->incFlushesSinceLastUsed();
+ }
+
+ // Count plots we can potentially upload to in all pages except the last one
+ // (the potential compactee).
+ if (pageIndex < lastPageIndex && plot->flushesSinceLastUsed() > kRecentlyUsedCount) {
+ ++availablePlots;
+ }
+
+ plotIter.next();
+ }
+ }
+
+ // We only try to compact if the atlas was used in the recently completed flush.
+ // TODO: consider if we should also do this if it's been a long time since the last atlas use
+ if (atlasUsedThisFlush) {
+ // Count recently used plots in the last page and evict them if there's available space
+ // in earlier pages. Since we prioritize uploading to the first pages, this will eventually
+ // clear out usage of this page unless we have a large need.
+ plotIter.init(fPages[lastPageIndex].fPlotList, PlotList::Iter::kHead_IterStart);
+ int usedPlots = 0;
+ while (Plot* plot = plotIter.get()) {
+ // If this plot was used recently
+ if (plot->flushesSinceLastUsed() <= kRecentlyUsedCount) {
+ usedPlots++;
+ // see if there's room in an earlier page and if so evict.
+ // We need to be somewhat harsh here so that one plot that is consistently in use
+ // doesn't end up locking the page in memory.
+ if (availablePlots) {
+ this->processEviction(plot->id());
+ plot->resetRects();
+ --availablePlots;
+ }
+ } else {
+ // otherwise if aged out just evict it.
+ this->processEviction(plot->id());
+ plot->resetRects();
+ }
+ plotIter.next();
+ }
+
+ // If none of the plots in the last page have been used recently, delete it.
+ if (!usedPlots) {
+ this->deleteLastPage();
+ }
+ }
+
+ fPrevFlushToken = startTokenForNextFlush;
+}
+
bool GrDrawOpAtlas::createNewPage() {
if (fNumPages == kMaxPages) {
return false;
@@ -360,3 +440,13 @@ bool GrDrawOpAtlas::createNewPage() {
fNumPages++;
return true;
}
+
+inline void GrDrawOpAtlas::deleteLastPage() {
+ uint32_t lastPageIndex = fNumPages - 1;
+ // clean out the plots
+ fPages[lastPageIndex].fPlotList.reset();
+ fPages[lastPageIndex].fPlotArray.reset(nullptr);
+ // remove ref to texture proxy
+ fProxies[lastPageIndex].reset(nullptr);
+ --fNumPages;
+}
diff --git a/src/gpu/GrDrawOpAtlas.h b/src/gpu/GrDrawOpAtlas.h
index 87d9c5b619..d4a81a0106 100644
--- a/src/gpu/GrDrawOpAtlas.h
+++ b/src/gpu/GrDrawOpAtlas.h
@@ -26,14 +26,29 @@ struct GrDrawOpAtlasConfig {
};
/**
- * This class manages an atlas texture on behalf of GrDrawOps. The draw ops that use the atlas
- * perform texture uploads when preparing their draws during flush. The class provides facilities
- * for using GrDrawOpUploadToken to detect data hazards. Op's uploads are performed in "asap" mode
- * until it is impossible to add data without overwriting texels read by draws that have not yet
- * executed on the gpu. At that point the uploads are performed "inline" between draws. If a single
- * draw would use enough subimage space to overflow the atlas texture then the atlas will fail to
- * add a subimage. This gives the op the chance to end the draw and begin a new one. Additional
- * uploads will then succeed in inline mode.
+ * This class manages one or more atlas textures on behalf of GrDrawOps. The draw ops that use the
+ * atlas perform texture uploads when preparing their draws during flush. The class provides
+ * facilities for using GrDrawOpUploadToken to detect data hazards. Op's uploads are performed in
+ * "asap" mode until it is impossible to add data without overwriting texels read by draws that
+ * have not yet executed on the gpu. At that point, the atlas will attempt to allocate a new
+ * atlas texture (or "page") of the same size, up to a maximum number of textures, and upload
+ * to that texture. If that's not possible, the uploads are performed "inline" between draws. If a
+ * single draw would use enough subimage space to overflow the atlas texture then the atlas will
+ * fail to add a subimage. This gives the op the chance to end the draw and begin a new one.
+ * Additional uploads will then succeed in inline mode.
+ *
+ * When the atlas has multiple pages, new uploads are prioritized to the lower index pages, i.e.,
+ * it will try to upload to page 0 before page 1 or 2. To keep the atlas from continually using
+ * excess space, periodic garbage collection is needed to shift data from the higher index pages to
+ * the lower ones, and then eventually remove any pages that are no longer in use. "In use" is
+ * determined by using the GrDrawUploadToken system: After a flush each subarea of the page
+ * is checked to see whether it was used in that flush; if it is not, a counter is incremented.
+ * Once that counter reaches a threshold that subarea is considered to be no longer in use.
+ *
+ * Garbage collection is initiated by the GrDrawOpAtlas's client via the compact() method. One
+ * solution is to make the client a subclass of GrOnFlushCallbackObject, register it with the
+ * GrContext via addOnFlushCallbackObject(), and the client's postFlush() method calls compact()
+ * and passes in the given GrDrawUploadToken.
*/
class GrDrawOpAtlas {
public:
@@ -186,6 +201,8 @@ public:
}
}
+ void compact(GrDrawOpUploadToken startTokenForNextFlush);
+
static constexpr auto kGlyphMaxDim = 256;
static bool GlyphTooLargeForAtlas(int width, int height) {
return width > kGlyphMaxDim || height > kGlyphMaxDim;
@@ -240,6 +257,10 @@ private:
void uploadToTexture(GrDrawOp::WritePixelsFn&, GrTextureProxy*);
void resetRects();
+ int flushesSinceLastUsed() { return fFlushesSinceLastUse; }
+ void resetFlushesSinceLastUsed() { fFlushesSinceLastUse = 0; }
+ void incFlushesSinceLastUsed() { fFlushesSinceLastUse++; }
+
private:
Plot(int pageIndex, int plotIndex, uint64_t genID, int offX, int offY, int width, int height,
GrPixelConfig config);
@@ -265,6 +286,8 @@ private:
GrDrawOpUploadToken fLastUpload;
GrDrawOpUploadToken fLastUse;
+ // the number of flushes since this plot has been last used
+ int fFlushesSinceLastUse;
struct {
const uint32_t fPageIndex : 16;
@@ -310,10 +333,12 @@ private:
fPages[pageIdx].fPlotList.remove(plot);
fPages[pageIdx].fPlotList.addToHead(plot);
- // TODO: make page MRU
+ // No MRU update for pages -- since we will always try to add from
+ // the front and remove from the back there is no need for MRU.
}
bool createNewPage();
+ void deleteLastPage();
inline void processEviction(AtlasID);
@@ -326,6 +351,8 @@ private:
SkDEBUGCODE(uint32_t fNumPlots;)
uint64_t fAtlasGeneration;
+ // nextTokenToFlush() value at the end of the previous flush
+ GrDrawOpUploadToken fPrevFlushToken;
struct EvictionData {
EvictionFunc fFunc;
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index c229a2a1eb..eb035597a1 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -214,7 +214,7 @@ GrSemaphoresSubmitted GrDrawingManager::internalFlush(GrSurfaceProxy*,
fContext->getResourceCache()->notifyFlushOccurred(type);
}
for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
- onFlushCBObject->postFlush();
+ onFlushCBObject->postFlush(fFlushState.nextTokenToFlush());
}
fFlushing = false;
diff --git a/src/gpu/GrOnFlushResourceProvider.h b/src/gpu/GrOnFlushResourceProvider.h
index e2c0b73075..08dd012c99 100644
--- a/src/gpu/GrOnFlushResourceProvider.h
+++ b/src/gpu/GrOnFlushResourceProvider.h
@@ -9,6 +9,7 @@
#define GrOnFlushResourceProvider_DEFINED
#include "GrTypes.h"
+#include "GrOpFlushState.h"
#include "GrResourceProvider.h"
#include "SkRefCnt.h"
#include "SkTArray.h"
@@ -43,9 +44,9 @@ public:
/**
* Called once flushing is complete and all ops indicated by preFlush have been executed and
- * released.
+ * released. startTokenForNextFlush can be used to track resources used in the current flush.
*/
- virtual void postFlush() {}
+ virtual void postFlush(GrDrawOpUploadToken startTokenForNextFlush) {}
private:
typedef SkRefCnt INHERITED;
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
index 466f90a890..843d712c5c 100644
--- a/src/gpu/GrPathRendererChain.cpp
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -63,7 +63,9 @@ GrPathRendererChain::GrPathRendererChain(GrContext* context, const Options& opti
fChain.push_back(sk_make_sp<GrAALinearizingConvexPathRenderer>());
}
if (options.fGpuPathRenderers & GpuPathRenderers::kSmall) {
- fChain.push_back(sk_make_sp<GrSmallPathRenderer>());
+ auto spr = sk_make_sp<GrSmallPathRenderer>();
+ context->contextPriv().addOnFlushCallbackObject(spr.get());
+ fChain.push_back(spr);
}
if (options.fGpuPathRenderers & GpuPathRenderers::kTessellating) {
fChain.push_back(sk_make_sp<GrTessellatingPathRenderer>());
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 9b2e956f9e..46467bc9ac 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -387,7 +387,7 @@ void DrawPathsOp::onExecute(GrOpFlushState* flushState) {
SkASSERT(baseInstance == fBaseInstance + fDebugInstanceCount - fDebugSkippedInstances);
}
-void GrCoverageCountingPathRenderer::postFlush() {
+void GrCoverageCountingPathRenderer::postFlush(GrDrawOpUploadToken) {
SkASSERT(fFlushing);
fPerFlushAtlases.reset();
fPerFlushInstanceBuffer.reset();
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index ee73cb3d18..6025581da8 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -44,7 +44,7 @@ public:
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
SkTArray<sk_sp<GrRenderTargetContext>>* results) override;
- void postFlush() override;
+ void postFlush(GrDrawOpUploadToken) override;
// This is the Op that ultimately draws a path into its final destination, using the atlas we
// generate at flush time.
diff --git a/src/gpu/ops/GrDrawOp.h b/src/gpu/ops/GrDrawOp.h
index 145d2e2981..ecd92498ae 100644
--- a/src/gpu/ops/GrDrawOp.h
+++ b/src/gpu/ops/GrDrawOp.h
@@ -33,6 +33,10 @@ public:
return fSequenceNumber == that.fSequenceNumber;
}
bool operator!=(const GrDrawOpUploadToken& that) const { return !(*this == that); }
+ bool inInterval(const GrDrawOpUploadToken& start, const GrDrawOpUploadToken& finish) {
+ return fSequenceNumber >= start.fSequenceNumber &&
+ fSequenceNumber <= finish.fSequenceNumber;
+ }
private:
GrDrawOpUploadToken();
diff --git a/src/gpu/ops/GrSmallPathRenderer.h b/src/gpu/ops/GrSmallPathRenderer.h
index b958baa843..9c3104b136 100644
--- a/src/gpu/ops/GrSmallPathRenderer.h
+++ b/src/gpu/ops/GrSmallPathRenderer.h
@@ -9,6 +9,7 @@
#define GrSmallPathRenderer_DEFINED
#include "GrDrawOpAtlas.h"
+#include "GrOnFlushResourceProvider.h"
#include "GrPathRenderer.h"
#include "GrRect.h"
#include "GrShape.h"
@@ -18,7 +19,7 @@
class GrContext;
-class GrSmallPathRenderer : public GrPathRenderer {
+class GrSmallPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
public:
GrSmallPathRenderer();
~GrSmallPathRenderer() override;
@@ -26,6 +27,15 @@ public:
class SmallPathOp;
struct PathTestStruct;
+ void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int,
+ SkTArray<sk_sp<GrRenderTargetContext>>*) override {}
+
+ void postFlush(GrDrawOpUploadToken startTokenForNextFlush) override {
+ if (fAtlas) {
+ fAtlas->compact(startTokenForNextFlush);
+ }
+ }
+
private:
StencilSupport onGetStencilSupport(const GrShape&) const override {
return GrPathRenderer::kNoSupport_StencilSupport;
diff --git a/src/gpu/text/GrAtlasGlyphCache.h b/src/gpu/text/GrAtlasGlyphCache.h
index 78b743e2f4..071ce0f96f 100644
--- a/src/gpu/text/GrAtlasGlyphCache.h
+++ b/src/gpu/text/GrAtlasGlyphCache.h
@@ -11,6 +11,7 @@
#include "GrCaps.h"
#include "GrDrawOpAtlas.h"
#include "GrGlyph.h"
+#include "GrOnFlushResourceProvider.h"
#include "SkArenaAlloc.h"
#include "SkGlyphCache.h"
#include "SkTDynamicHash.h"
@@ -108,10 +109,10 @@ private:
* though this is more or less transparent to the client(aside from atlasGeneration, described
* below).
*/
-class GrAtlasGlyphCache {
+class GrAtlasGlyphCache : public GrOnFlushCallbackObject {
public:
GrAtlasGlyphCache(GrContext*, float maxTextureBytes);
- ~GrAtlasGlyphCache();
+ ~GrAtlasGlyphCache() override;
// The user of the cache may hold a long-lived ref to the returned strike. However, actions by
// another client of the cache may cause the strike to be purged while it is still reffed.
// Therefore, the caller must check GrAtlasTextStrike::isAbandoned() if there are other
@@ -181,6 +182,17 @@ public:
return this->getAtlas(format)->atlasGeneration();
}
+ void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int,
+ SkTArray<sk_sp<GrRenderTargetContext>>*) override {}
+
+ void postFlush(GrDrawOpUploadToken startTokenForNextFlush) override {
+ for (int i = 0; i < kMaskFormatCount; ++i) {
+ if (fAtlases[i]) {
+ fAtlases[i]->compact(startTokenForNextFlush);
+ }
+ }
+ }
+
///////////////////////////////////////////////////////////////////////////
// Functions intended debug only
#ifdef SK_DEBUG