aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Robert Phillips <robertphillips@google.com>2017-05-11 16:29:14 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-05-12 12:10:31 +0000
commit6cdc22cde8e6297d34fdaaa3ed5e69ae86c30a77 (patch)
treea2462bae16096f92b528d059ff1fb95881ed8d0d
parent5c7960be57010bf61db3d4ce879a3194687b5af9 (diff)
Split up opLists (take 3)
Reland of: https://skia-review.googlesource.com/c/11581/ (Split up opLists) https://skia-review.googlesource.com/c/13860/ (Make InstancedRendering more opList-splitting friendly) has landed so this should be good for another attempt. TBR=egdaniel@google.com Change-Id: I2a09729342bb035af3a16807c1895adbae432ade Reviewed-on: https://skia-review.googlesource.com/14186 Reviewed-by: Robert Phillips <robertphillips@google.com> Commit-Queue: Robert Phillips <robertphillips@google.com>
-rw-r--r--include/private/GrSurfaceProxy.h1
-rw-r--r--src/gpu/GrDrawingManager.cpp50
-rw-r--r--src/gpu/GrOnFlushResourceProvider.cpp16
-rw-r--r--src/gpu/GrOpList.cpp24
-rw-r--r--src/gpu/GrOpList.h22
-rw-r--r--src/gpu/GrRenderTargetContext.cpp3
-rw-r--r--src/gpu/GrRenderTargetOpList.cpp38
-rw-r--r--src/gpu/GrRenderTargetOpList.h3
-rw-r--r--src/gpu/GrSurfaceProxy.cpp18
-rw-r--r--src/gpu/GrTextureContext.cpp20
-rw-r--r--src/gpu/GrTextureOpList.cpp3
-rw-r--r--tests/SurfaceTest.cpp2
12 files changed, 86 insertions, 114 deletions
diff --git a/include/private/GrSurfaceProxy.h b/include/private/GrSurfaceProxy.h
index 29bb8620e0..2d60c8d648 100644
--- a/include/private/GrSurfaceProxy.h
+++ b/include/private/GrSurfaceProxy.h
@@ -375,6 +375,7 @@ private:
// This back-pointer is required so that we can add a dependancy between
// the opList used to create the current contents of this surface
// and the opList of a destination surface to which this one is being drawn or copied.
+ // This pointer is unreffed. OpLists own a ref on their surface proxies.
GrOpList* fLastOpList;
typedef GrIORefProxy INHERITED;
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index b6ca9661a6..38cfb8c71a 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -27,10 +27,10 @@ void GrDrawingManager::cleanup() {
for (int i = 0; i < fOpLists.count(); ++i) {
// no opList should receive a new command after this
fOpLists[i]->makeClosed(*fContext->caps());
- fOpLists[i]->clearTarget();
// We shouldn't need to do this, but it turns out some clients still hold onto opLists
- // after a cleanup
+ // after a cleanup.
+ // MDB TODO: is this still true?
fOpLists[i]->reset();
}
@@ -93,6 +93,7 @@ void GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType
// but need to be flushed anyway. Closing such GrOpLists here will mean new
// GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
fOpLists[i]->makeClosed(*fContext->caps());
+ SkDEBUGCODE(fOpLists[i]->validateTargetsSingleRenderTarget());
}
#ifdef ENABLE_MDB
@@ -162,17 +163,7 @@ void GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType
fOpLists[i]->reset();
}
-#ifndef ENABLE_MDB
- // When MDB is disabled we keep reusing the same GrOpList
- if (fOpLists.count()) {
- SkASSERT(fOpLists.count() == 1);
- // Clear out this flag so the topological sort's SkTTopoSort_CheckAllUnmarked check
- // won't bark
- fOpLists[0]->resetFlag(GrOpList::kWasOutput_Flag);
- }
-#else
fOpLists.reset();
-#endif
fFlushState.reset();
// We always have to notify the cache when it requested a flush so it can reset its state.
@@ -212,19 +203,12 @@ void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlush
sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* rtp) {
SkASSERT(fContext);
-#ifndef ENABLE_MDB
- // When MDB is disabled we always just return the single GrOpList
- if (fOpLists.count()) {
- SkASSERT(fOpLists.count() == 1);
- // In the non-MDB-world the same GrOpList gets reused for multiple render targets.
- // Update this pointer so all the asserts are happy
- rtp->setLastOpList(fOpLists[0].get());
- // DrawingManager gets the creation ref - this ref is for the caller
-
- // TODO: although this is true right now it isn't cool
- return sk_ref_sp((GrRenderTargetOpList*) fOpLists[0].get());
+ // This is a temporary fix for the partial-MDB world. In that world we're not reordering
+ // so ops that (in the single opList world) would've just glommed onto the end of the single
+ // opList but referred to a far earlier RT need to appear in their own opList.
+ if (!fOpLists.empty()) {
+ fOpLists.back()->makeClosed(*fContext->caps());
}
-#endif
sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(rtp,
fContext->getGpu(),
@@ -239,19 +223,21 @@ sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(GrRenderTargetProxy* r
sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(GrTextureProxy* textureProxy) {
SkASSERT(fContext);
+ // This is a temporary fix for the partial-MDB world. In that world we're not reordering
+ // so ops that (in the single opList world) would've just glommed onto the end of the single
+ // opList but referred to a far earlier RT need to appear in their own opList.
+ if (!fOpLists.empty()) {
+ fOpLists.back()->makeClosed(*fContext->caps());
+ }
+
sk_sp<GrTextureOpList> opList(new GrTextureOpList(textureProxy, fContext->getGpu(),
fContext->getAuditTrail()));
-#ifndef ENABLE_MDB
- // When MDB is disabled we still create a new GrOpList, but don't store or ref it - we rely
- // on the caller to immediately execute and free it.
- return opList;
-#else
- *fOpLists.append() = opList;
+ SkASSERT(textureProxy->getLastOpList() == opList.get());
+
+ fOpLists.push_back() = opList;
- // Drawing manager gets the creation ref - this ref is for the caller
return opList;
-#endif
}
GrAtlasTextContext* GrDrawingManager::getAtlasTextContext() {
diff --git a/src/gpu/GrOnFlushResourceProvider.cpp b/src/gpu/GrOnFlushResourceProvider.cpp
index a86d269d39..8b031f4e32 100644
--- a/src/gpu/GrOnFlushResourceProvider.cpp
+++ b/src/gpu/GrOnFlushResourceProvider.cpp
@@ -30,14 +30,6 @@ sk_sp<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTargetContext(
return nullptr;
}
- // MDB TODO: This explicit resource creation is required in the pre-MDB world so that the
- // pre-Flush ops are placed in their own opList.
- sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
- proxy->asRenderTargetProxy(),
- fDrawingMgr->fContext->getGpu(),
- fDrawingMgr->fContext->getAuditTrail()));
- proxy->setLastOpList(opList.get());
-
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
@@ -58,14 +50,6 @@ sk_sp<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTargetContext(
sk_sp<GrSurfaceProxy> proxy,
sk_sp<SkColorSpace> colorSpace,
const SkSurfaceProps* props) {
- // MDB TODO: This explicit resource creation is required in the pre-MDB world so that the
- // pre-Flush ops are placed in their own opList.
- sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
- proxy->asRenderTargetProxy(),
- fDrawingMgr->fContext->getGpu(),
- fDrawingMgr->fContext->getAuditTrail()));
- proxy->setLastOpList(opList.get());
-
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index a088c447ae..cf67dab910 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -21,21 +21,28 @@ uint32_t GrOpList::CreateUniqueID() {
}
GrOpList::GrOpList(GrSurfaceProxy* surfaceProxy, GrAuditTrail* auditTrail)
- // MDB TODO: in the future opLists will own the GrSurfaceProxy they target.
- // For now, preserve the status quo.
- : fTarget(surfaceProxy)
- , fAuditTrail(auditTrail)
+ : fAuditTrail(auditTrail)
, fUniqueID(CreateUniqueID())
, fFlags(0) {
- surfaceProxy->setLastOpList(this);
+ fTarget.reset(surfaceProxy);
+ fTarget.get()->setLastOpList(this);
}
GrOpList::~GrOpList() {
- if (fTarget && this == fTarget->getLastOpList()) {
- fTarget->setLastOpList(nullptr);
+ if (fTarget.get() && this == fTarget.get()->getLastOpList()) {
+ fTarget.get()->setLastOpList(nullptr);
}
}
+void GrOpList::reset() {
+ if (fTarget.get() && this == fTarget.get()->getLastOpList()) {
+ fTarget.get()->setLastOpList(nullptr);
+ }
+
+ fTarget.reset(nullptr);
+ fAuditTrail = nullptr;
+}
+
// Add a GrOpList-based dependency
void GrOpList::addDependency(GrOpList* dependedOn) {
SkASSERT(!dependedOn->dependsOn(this)); // loops are bad
@@ -68,7 +75,8 @@ void GrOpList::addDependency(GrSurfaceProxy* dependedOn, const GrCaps& caps) {
#ifdef SK_DEBUG
void GrOpList::dump() const {
SkDebugf("--------------------------------------------------------------\n");
- SkDebugf("node: %d -> RT: %d\n", fUniqueID, fTarget ? fTarget->uniqueID().asUInt() : -1);
+ SkDebugf("node: %d -> RT: %d\n", fUniqueID, fTarget.get() ? fTarget.get()->uniqueID().asUInt()
+ : -1);
SkDebugf("relies On (%d): ", fDependencies.count());
for (int i = 0; i < fDependencies.count(); ++i) {
SkDebugf("%d, ", fDependencies[i]->fUniqueID);
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
index d6e1c30e4d..f0dc1c192c 100644
--- a/src/gpu/GrOpList.h
+++ b/src/gpu/GrOpList.h
@@ -8,6 +8,8 @@
#ifndef GrOpList_DEFINED
#define GrOpList_DEFINED
+#include "GrGpuResourceRef.h"
+
#include "SkRefCnt.h"
#include "SkTDArray.h"
@@ -30,24 +32,16 @@ public:
virtual bool executeOps(GrOpFlushState* flushState) = 0;
virtual void makeClosed(const GrCaps&) {
- // We only close GrOpLists when MDB is enabled. When MDB is disabled there is only
- // ever one GrOpLists and all calls will be funnelled into it.
-#ifdef ENABLE_MDB
this->setFlag(kClosed_Flag);
-#endif
}
- virtual void reset() = 0;
+ virtual void reset();
// TODO: in an MDB world, where the OpLists don't allocate GPU resources, it seems like
// these could go away
virtual void abandonGpuResources() = 0;
virtual void freeGpuResources() = 0;
- // TODO: this entry point is only needed in the non-MDB world. Remove when
- // we make the switch to MDB
- void clearTarget() { fTarget = nullptr; }
-
bool isClosed() const { return this->isSetFlag(kClosed_Flag); }
/*
@@ -85,8 +79,8 @@ public:
SkDEBUGCODE(virtual int numClips() const { return 0; })
protected:
- GrSurfaceProxy* fTarget;
- GrAuditTrail* fAuditTrail;
+ GrPendingIOResource<GrSurfaceProxy, kWrite_GrIOType> fTarget;
+ GrAuditTrail* fAuditTrail;
private:
friend class GrDrawingManager; // for resetFlag & TopoSortTraits
@@ -138,11 +132,11 @@ private:
void addDependency(GrOpList* dependedOn);
- uint32_t fUniqueID;
- uint32_t fFlags;
+ uint32_t fUniqueID;
+ uint32_t fFlags;
// 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
- SkTDArray<GrOpList*> fDependencies;
+ SkTDArray<GrOpList*> fDependencies;
typedef SkRefCnt INHERITED;
};
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index d6081dbed8..f18e7b2a47 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -105,6 +105,9 @@ void GrRenderTargetContext::validate() const {
#endif
GrRenderTargetContext::~GrRenderTargetContext() {
+ if (fOpList) {
+ fOpList->makeClosed(*this->caps());
+ }
ASSERT_SINGLE_OWNER
}
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 56b817f1fe..4a6590687d 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -78,7 +78,7 @@ void GrRenderTargetOpList::validateTargetsSingleRenderTarget() const {
#endif
void GrRenderTargetOpList::prepareOps(GrOpFlushState* flushState) {
- // MDB TODO: add SkASSERT(this->isClosed());
+ SkASSERT(this->isClosed());
// Loop over the ops that haven't yet been prepared.
for (int i = 0; i < fRecordedOps.count(); ++i) {
@@ -131,34 +131,34 @@ bool GrRenderTargetOpList::executeOps(GrOpFlushState* flushState) {
if (0 == fRecordedOps.count()) {
return false;
}
- // Draw all the generated geometry.
- const GrRenderTarget* currentRenderTarget = fRecordedOps[0].fRenderTarget.get();
+
+#ifdef SK_DEBUG
+ GrSurface* target = fTarget.get()->instantiate(flushState->resourceProvider());
+ if (!target) {
+ return false;
+ }
+ const GrRenderTarget* currentRenderTarget = target->asRenderTarget();
SkASSERT(currentRenderTarget);
- std::unique_ptr<GrGpuCommandBuffer> commandBuffer;
+#endif
+
+ std::unique_ptr<GrGpuCommandBuffer> commandBuffer = create_command_buffer(flushState->gpu());
+ flushState->setCommandBuffer(commandBuffer.get());
+ // Draw all the generated geometry.
for (int i = 0; i < fRecordedOps.count(); ++i) {
if (!fRecordedOps[i].fOp) {
continue;
}
- SkASSERT(fRecordedOps[i].fRenderTarget.get());
+ SkASSERT(fRecordedOps[i].fRenderTarget.get() == currentRenderTarget);
if (fRecordedOps[i].fOp->needsCommandBufferIsolation()) {
// This op is a special snowflake and must occur between command buffers
// TODO: make this go through the command buffer
finish_command_buffer(commandBuffer.get());
- currentRenderTarget = fRecordedOps[i].fRenderTarget.get();
commandBuffer.reset();
flushState->setCommandBuffer(commandBuffer.get());
- } else if (fRecordedOps[i].fRenderTarget.get() != currentRenderTarget) {
- // Changing renderTarget
- // MDB TODO: this code path goes away
- finish_command_buffer(commandBuffer.get());
- currentRenderTarget = fRecordedOps[i].fRenderTarget.get();
-
- commandBuffer = create_command_buffer(flushState->gpu());
- flushState->setCommandBuffer(commandBuffer.get());
} else if (!commandBuffer) {
commandBuffer = create_command_buffer(flushState->gpu());
flushState->setCommandBuffer(commandBuffer.get());
@@ -190,7 +190,10 @@ void GrRenderTargetOpList::reset() {
fRecordedOps.reset();
if (fInstancedRendering) {
fInstancedRendering->endFlush();
+ fInstancedRendering = nullptr;
}
+
+ INHERITED::reset();
}
void GrRenderTargetOpList::abandonGpuResources() {
@@ -305,6 +308,13 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op,
const GrCaps* caps = renderTargetContext->caps();
+#ifdef SK_DEBUG
+ if (!fRecordedOps.empty()) {
+ GrRenderTargetOpList::RecordedOp& back = fRecordedOps.back();
+ SkASSERT(back.fRenderTarget == renderTarget);
+ }
+#endif
+
// A closed GrOpList should never receive new/more ops
SkASSERT(!this->isClosed());
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h
index f3514c72a8..aa8c6c69f8 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -151,7 +151,8 @@ private:
int32_t fLastClipStackGenID;
SkIRect fLastDevClipBounds;
- SkSTArray<256, RecordedOp, true> fRecordedOps;
+ // For ops/opList we have mean: 5 stdDev: 28
+ SkSTArray<5, RecordedOp, true> fRecordedOps;
// MDB TODO: 4096 for the first allocation of the clip space will be huge overkill.
// Gather statistics to determine the correct size.
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index fd9a9c443e..15c4c4f655 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -32,10 +32,9 @@ GrSurfaceProxy::GrSurfaceProxy(sk_sp<GrSurface> surface, SkBackingFit fit)
}
GrSurfaceProxy::~GrSurfaceProxy() {
- if (fLastOpList) {
- fLastOpList->clearTarget();
- }
- SkSafeUnref(fLastOpList);
+ // For this to be deleted the opList that held a ref on it (if there was one) must have been
+ // deleted. Which would have cleared out this back pointer.
+ SkASSERT(!fLastOpList);
}
GrSurface* GrSurfaceProxy::instantiate(GrResourceProvider* resourceProvider) {
@@ -97,15 +96,14 @@ int GrSurfaceProxy::worstCaseHeight(const GrCaps& caps) const {
}
void GrSurfaceProxy::setLastOpList(GrOpList* opList) {
+#ifdef SK_DEBUG
if (fLastOpList) {
- // The non-MDB world never closes so we can't check this condition
-#ifdef ENABLE_MDB
SkASSERT(fLastOpList->isClosed());
-#endif
- fLastOpList->clearTarget();
}
+#endif
- SkRefCnt_SafeAssign(fLastOpList, opList);
+ // Un-reffed
+ fLastOpList = opList;
}
GrRenderTargetOpList* GrSurfaceProxy::getLastRenderTargetOpList() {
@@ -312,7 +310,7 @@ void GrSurfaceProxyPriv::exactify() {
// It could mess things up if prior decisions were based on the approximate size.
fProxy->fFit = SkBackingFit::kExact;
// If fGpuMemorySize is used when caching specialImages for the image filter DAG. If it has
- // already been computed we want to leave it alone so that amount will be removed when
+ // already been computed we want to leave it alone so that amount will be removed when
// the special image goes away. If it hasn't been computed yet it might as well compute the
// exact amount.
}
diff --git a/src/gpu/GrTextureContext.cpp b/src/gpu/GrTextureContext.cpp
index 479957b8e1..0021c23da1 100644
--- a/src/gpu/GrTextureContext.cpp
+++ b/src/gpu/GrTextureContext.cpp
@@ -77,23 +77,7 @@ bool GrTextureContext::onCopy(GrSurfaceProxy* srcProxy,
SkDEBUGCODE(this->validate();)
GR_AUDIT_TRAIL_AUTO_FRAME(fAuditTrail, "GrTextureContext::onCopy");
-#ifndef ENABLE_MDB
- // We can't yet fully defer copies to textures, so GrTextureContext::copySurface will
- // execute the copy immediately. Ensure the data is ready.
- fContext->contextPriv().flushSurfaceWrites(srcProxy);
-#endif
-
- GrTextureOpList* opList = this->getOpList();
- bool result = opList->copySurface(fContext->resourceProvider(),
- fTextureProxy.get(), srcProxy, srcRect, dstPoint);
-
-#ifndef ENABLE_MDB
- GrOpFlushState flushState(fContext->getGpu(), nullptr);
- opList->prepareOps(&flushState);
- opList->executeOps(&flushState);
- opList->reset();
-#endif
-
- return result;
+ return this->getOpList()->copySurface(fContext->resourceProvider(),
+ fTextureProxy.get(), srcProxy, srcRect, dstPoint);
}
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 15ca7c1aab..24bea3331c 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -50,7 +50,7 @@ void GrTextureOpList::validateTargetsSingleRenderTarget() const {
#endif
void GrTextureOpList::prepareOps(GrOpFlushState* flushState) {
- // MDB TODO: add SkASSERT(this->isClosed());
+ SkASSERT(this->isClosed());
// Loop over the ops that haven't yet generated their geometry
for (int i = 0; i < fRecordedOps.count(); ++i) {
@@ -77,6 +77,7 @@ bool GrTextureOpList::executeOps(GrOpFlushState* flushState) {
void GrTextureOpList::reset() {
fRecordedOps.reset();
+ INHERITED::reset();
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/tests/SurfaceTest.cpp b/tests/SurfaceTest.cpp
index 1a03f15b0b..b8092e46ef 100644
--- a/tests/SurfaceTest.cpp
+++ b/tests/SurfaceTest.cpp
@@ -914,6 +914,8 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(SurfaceCreationWithColorSpace_Gpu, reporter,
test_surface_creation_and_snapshot_with_color_space(reporter, "wrapped", f16Support,
wrappedSurfaceMaker);
+ context->flush();
+
for (auto textureHandle : textureHandles) {
context->getGpu()->deleteTestingOnlyBackendTexture(textureHandle);
}