diff options
author | Robert Phillips <robertphillips@google.com> | 2017-04-11 12:54:57 -0400 |
---|---|---|
committer | Skia Commit-Bot <skia-commit-bot@chromium.org> | 2017-04-11 19:38:18 +0000 |
commit | 1119dc366e15ef737d05d3a087410ea40c508101 (patch) | |
tree | 0b1363476a28c6df8baf79a46dbee8a74eac48af /src/gpu | |
parent | fafe135349bd34961a12bfd8185733709cd0e45e (diff) |
Remove discard from GrRenderTarget & force it to always go through a RenderTargetContext
This is a bit sloppy in that it ignores some instances where discards were being issued before.
The creation of the temp RTContext in the RenderTarget's discard method was causing an extra split in the opLists.
This is split out of: https://skia-review.googlesource.com/c/10284/ (Omnibus: Remove GrSurface-derived classes from ops)
Change-Id: Ic366d303280635763b0fae238c4df37c04fb8503
Reviewed-on: https://skia-review.googlesource.com/11125
Commit-Queue: Robert Phillips <robertphillips@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Diffstat (limited to 'src/gpu')
-rw-r--r-- | src/gpu/GrContext.cpp | 25 | ||||
-rw-r--r-- | src/gpu/GrContextPriv.h | 4 | ||||
-rw-r--r-- | src/gpu/GrGpu.cpp | 8 | ||||
-rw-r--r-- | src/gpu/GrPreFlushResourceProvider.cpp | 30 | ||||
-rw-r--r-- | src/gpu/GrRenderTarget.cpp | 16 | ||||
-rw-r--r-- | src/gpu/GrRenderTargetContext.cpp | 7 | ||||
-rw-r--r-- | src/gpu/GrRenderTargetOpList.cpp | 23 | ||||
-rw-r--r-- | src/gpu/GrRenderTargetOpList.h | 8 | ||||
-rw-r--r-- | src/gpu/GrResourceProvider.cpp | 24 | ||||
-rw-r--r-- | src/gpu/ops/GrClearOp.h | 7 | ||||
-rw-r--r-- | src/gpu/ops/GrDiscardOp.h | 29 |
11 files changed, 105 insertions, 76 deletions
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp index bfe094df7a..31f1787d56 100644 --- a/src/gpu/GrContext.cpp +++ b/src/gpu/GrContext.cpp @@ -267,7 +267,7 @@ static bool valid_unpremul_config(GrPixelConfig config) { return GrPixelConfigIs8888Unorm(config) || kRGBA_half_GrPixelConfig == config; } -bool GrContextPriv::writeSurfacePixels(GrSurfaceProxy* srcProxy, SkColorSpace* dstColorSpace, +bool GrContextPriv::writeSurfacePixels(GrSurfaceProxy* dstProxy, SkColorSpace* dstColorSpace, int left, int top, int width, int height, GrPixelConfig srcConfig, SkColorSpace* srcColorSpace, const void* buffer, size_t rowBytes, @@ -276,11 +276,11 @@ bool GrContextPriv::writeSurfacePixels(GrSurfaceProxy* srcProxy, SkColorSpace* d ASSERT_SINGLE_OWNER_PRIV RETURN_FALSE_IF_ABANDONED_PRIV - ASSERT_OWNED_PROXY_PRIV(srcProxy); - SkASSERT(srcProxy); + ASSERT_OWNED_PROXY_PRIV(dstProxy); + SkASSERT(dstProxy); GR_AUDIT_TRAIL_AUTO_FRAME(&fContext->fAuditTrail, "GrContextPriv::writeSurfacePixels"); - GrSurface* surface = srcProxy->instantiate(fContext->resourceProvider()); + GrSurface* surface = dstProxy->instantiate(fContext->resourceProvider()); if (!surface) { return false; } @@ -847,6 +847,8 @@ sk_sp<GrRenderTargetContext> GrContext::makeRenderTargetContext(SkBackingFit fit return nullptr; } + renderTargetContext->discard(); + return renderTargetContext; } @@ -873,9 +875,18 @@ sk_sp<GrRenderTargetContext> GrContext::makeDeferredRenderTargetContext( return nullptr; } - return fDrawingManager->makeRenderTargetContext(std::move(rtp), - std::move(colorSpace), - surfaceProps); + sk_sp<GrRenderTargetContext> renderTargetContext( + fDrawingManager->makeRenderTargetContext(std::move(rtp), + std::move(colorSpace), + surfaceProps)); + + if (!renderTargetContext) { + return nullptr; + } + + renderTargetContext->discard(); + + return renderTargetContext; } bool GrContext::abandoned() const { diff --git a/src/gpu/GrContextPriv.h b/src/gpu/GrContextPriv.h index 6ed53ff9d5..95df8eacc9 100644 --- a/src/gpu/GrContextPriv.h +++ b/src/gpu/GrContextPriv.h @@ -136,7 +136,7 @@ public: /** * Writes a rectangle of pixels to a surface. - * @param surface the surface to write to. + * @param dst the surface to write to. * @param dstColorSpace color space of the surface * @param left left edge of the rectangle to write (inclusive) * @param top top edge of the rectangle to write (inclusive) @@ -151,7 +151,7 @@ public: * @return true if the write succeeded, false if not. The write can fail because of an * unsupported combination of surface and src configs. */ - bool writeSurfacePixels(GrSurfaceProxy* src, SkColorSpace* dstColorSpace, + bool writeSurfacePixels(GrSurfaceProxy* dst, SkColorSpace* dstColorSpace, int left, int top, int width, int height, GrPixelConfig config, SkColorSpace* srcColorSpace, const void* buffer, size_t rowBytes, diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp index 40988c3922..b1560f9a2d 100644 --- a/src/gpu/GrGpu.cpp +++ b/src/gpu/GrGpu.cpp @@ -188,14 +188,6 @@ GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budget fStats.incTextureUploads(); } } - // This is a current work around to get discards into newly created textures. Once we are in - // MDB world, we should remove this code a rely on the draw target having specified load - // operations. - if (isRT && texels.empty()) { - GrRenderTarget* rt = tex->asRenderTarget(); - SkASSERT(rt); - rt->discard(); - } } return tex; } diff --git a/src/gpu/GrPreFlushResourceProvider.cpp b/src/gpu/GrPreFlushResourceProvider.cpp index e907f3925d..7a105604e0 100644 --- a/src/gpu/GrPreFlushResourceProvider.cpp +++ b/src/gpu/GrPreFlushResourceProvider.cpp @@ -38,9 +38,18 @@ sk_sp<GrRenderTargetContext> GrPreFlushResourceProvider::makeRenderTargetContext fDrawingMgr->fOptionsForOpLists)); proxy->setLastOpList(opList.get()); - return fDrawingMgr->makeRenderTargetContext(std::move(proxy), - std::move(colorSpace), - props); + sk_sp<GrRenderTargetContext> renderTargetContext( + fDrawingMgr->makeRenderTargetContext(std::move(proxy), + std::move(colorSpace), + props)); + + if (!renderTargetContext) { + return nullptr; + } + + renderTargetContext->discard(); + + return renderTargetContext; } // TODO: we only need this entry point as long as we have to pre-allocate the atlas. @@ -58,8 +67,17 @@ sk_sp<GrRenderTargetContext> GrPreFlushResourceProvider::makeRenderTargetContext fDrawingMgr->fOptionsForOpLists)); proxy->setLastOpList(opList.get()); - return fDrawingMgr->makeRenderTargetContext(std::move(proxy), - std::move(colorSpace), - props); + sk_sp<GrRenderTargetContext> renderTargetContext( + fDrawingMgr->makeRenderTargetContext(std::move(proxy), + std::move(colorSpace), + props)); + + if (!renderTargetContext) { + return nullptr; + } + + renderTargetContext->discard(); + + return renderTargetContext; } diff --git a/src/gpu/GrRenderTarget.cpp b/src/gpu/GrRenderTarget.cpp index f495cd34bb..9ea8596050 100644 --- a/src/gpu/GrRenderTarget.cpp +++ b/src/gpu/GrRenderTarget.cpp @@ -28,22 +28,6 @@ GrRenderTarget::GrRenderTarget(GrGpu* gpu, const GrSurfaceDesc& desc, Flags flag fResolveRect.setLargestInverted(); } -void GrRenderTarget::discard() { - // go through context so that all necessary flushing occurs - GrContext* context = this->getContext(); - if (!context) { - return; - } - - sk_sp<GrRenderTargetContext> renderTargetContext( - context->contextPriv().makeWrappedRenderTargetContext(sk_ref_sp(this), nullptr)); - if (!renderTargetContext) { - return; - } - - renderTargetContext->discard(); -} - void GrRenderTarget::flagAsNeedingResolve(const SkIRect* rect) { if (kCanResolve_ResolveType == getResolveType()) { if (rect) { diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp index 3aefde0dc5..be3ad8b294 100644 --- a/src/gpu/GrRenderTargetContext.cpp +++ b/src/gpu/GrRenderTargetContext.cpp @@ -192,13 +192,6 @@ void GrRenderTargetContext::discard() { AutoCheckFlush acf(this->drawingManager()); - // TODO: This needs to be fixed up since it ends the deferral of the GrRenderTarget. - sk_sp<GrRenderTarget> rt( - sk_ref_sp(fRenderTargetProxy->instantiate(fContext->resourceProvider()))); - if (!rt) { - return; - } - this->getOpList()->discard(this); } diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp index a003c121d1..2b1cc149b1 100644 --- a/src/gpu/GrRenderTargetOpList.cpp +++ b/src/gpu/GrRenderTargetOpList.cpp @@ -14,7 +14,6 @@ #include "GrRenderTargetContext.h" #include "GrResourceProvider.h" #include "ops/GrClearOp.h" -#include "ops/GrClearStencilClipOp.h" #include "ops/GrCopySurfaceOp.h" #include "ops/GrDiscardOp.h" #include "instanced/InstancedRendering.h" @@ -229,8 +228,11 @@ void GrRenderTargetOpList::discard(GrRenderTargetContext* renderTargetContext) { // Currently this just inserts a discard op. However, once in MDB this can remove all the // previously recorded ops and change the load op to discard. if (this->caps()->discardRenderTargetSupport()) { - this->recordOp(GrDiscardOp::Make(renderTargetContext->accessRenderTarget()), - renderTargetContext); + std::unique_ptr<GrOp> op(GrDiscardOp::Make(renderTargetContext)); + if (!op) { + return; + } + this->recordOp(std::move(op), renderTargetContext); } } @@ -301,12 +303,12 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op, // 3) find a 'blocker' GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), renderTarget->uniqueID(), renderTargetContext->asRenderTargetProxy()->uniqueID()); - GrOP_INFO("Recording (%s, B%u)\n" - "\tBounds LRTB (%f, %f, %f, %f)\n", + GrOP_INFO("Recording (%s, opID: %u)\n" + "\tBounds: [L: %f T: %f R: %f B: %f]\n", op->name(), op->uniqueID(), - op->bounds().fLeft, op->bounds().fRight, - op->bounds().fTop, op->bounds().fBottom); + op->bounds().fLeft, op->bounds().fTop, + op->bounds().fRight, op->bounds().fBottom); GrOP_INFO(SkTabString(op->dumpInfo(), 1).c_str()); GrOP_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", op->bounds().fLeft, op->bounds().fTop, op->bounds().fRight, op->bounds().fBottom); @@ -319,12 +321,13 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op, const RecordedOp& candidate = fRecordedOps.fromBack(i); // We cannot continue to search backwards if the render target changes if (candidate.fRenderTarget.get() != renderTarget) { - GrOP_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n", candidate.fOp->name(), + GrOP_INFO("\t\tBreaking because of (%s, opID: %u) Rendertarget mismatch\n", + candidate.fOp->name(), candidate.fOp->uniqueID()); break; } if (this->combineIfPossible(candidate, op.get(), clip, dstTexture)) { - GrOP_INFO("\t\tCombining with (%s, B%u)\n", candidate.fOp->name(), + GrOP_INFO("\t\tCombining with (%s, opID: %u)\n", candidate.fOp->name(), candidate.fOp->uniqueID()); GrOP_INFO("\t\t\tCombined op info:\n"); GrOP_INFO(SkTabString(candidate.fOp->dumpInfo(), 4).c_str()); @@ -333,7 +336,7 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op, } // Stop going backwards if we would cause a painter's order violation. if (!can_reorder(fRecordedOps.fromBack(i).fOp->bounds(), op->bounds())) { - GrOP_INFO("\t\tIntersects with (%s, B%u)\n", candidate.fOp->name(), + GrOP_INFO("\t\tIntersects with (%s, opID: %u)\n", candidate.fOp->name(), candidate.fOp->uniqueID()); break; } diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h index 7e26490920..be4deb30ea 100644 --- a/src/gpu/GrRenderTargetOpList.h +++ b/src/gpu/GrRenderTargetOpList.h @@ -119,9 +119,13 @@ private: friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive struct RecordedOp { - RecordedOp(std::unique_ptr<GrOp> op, GrRenderTarget* rt, const GrAppliedClip* appliedClip, + RecordedOp(std::unique_ptr<GrOp> op, + GrRenderTarget* rt, + const GrAppliedClip* appliedClip, const DstTexture* dstTexture) - : fOp(std::move(op)), fRenderTarget(rt), fAppliedClip(appliedClip) { + : fOp(std::move(op)) + , fRenderTarget(rt) + , fAppliedClip(appliedClip) { if (dstTexture) { fDstTexture = *dstTexture; } diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp index dafa27bcf2..d2ec204d6c 100644 --- a/src/gpu/GrResourceProvider.cpp +++ b/src/gpu/GrResourceProvider.cpp @@ -58,12 +58,17 @@ sk_sp<GrTextureProxy> GrResourceProvider::createMipMappedTexture( SkDestinationSurfaceColorMode mipColorMode) { ASSERT_SINGLE_OWNER - if (this->isAbandoned()) { - return nullptr; + if (!mipLevelCount) { + if (texels) { + return nullptr; + } + return GrSurfaceProxy::MakeDeferred(this, desc, budgeted, nullptr, 0); } - if (mipLevelCount && !texels) { + + if (this->isAbandoned()) { return nullptr; } + for (int i = 0; i < mipLevelCount; ++i) { if (!texels[i].fPixels) { return nullptr; @@ -82,8 +87,8 @@ sk_sp<GrTextureProxy> GrResourceProvider::createMipMappedTexture( sk_sp<GrTexture> tex(this->refScratchTexture(desc, flags)); if (tex) { sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(tex); - if (!mipLevelCount || - fGpu->getContext()->contextPriv().writeSurfacePixels( + + if (fGpu->getContext()->contextPriv().writeSurfacePixels( proxy.get(), nullptr, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig, nullptr, texels[0].fPixels, texels[0].fRowBytes)) { if (SkBudgeted::kNo == budgeted) { @@ -143,12 +148,13 @@ GrTexture* GrResourceProvider::createApproxTexture(const GrSurfaceDesc& desc, ui if (this->isAbandoned()) { return nullptr; } + // Currently we don't recycle compressed textures as scratch. if (GrPixelConfigIsCompressed(desc.fConfig)) { return nullptr; - } else { - return this->refScratchTexture(desc, flags); } + + return this->refScratchTexture(desc, flags); } GrTexture* GrResourceProvider::refScratchTexture(const GrSurfaceDesc& inDesc, @@ -182,10 +188,6 @@ GrTexture* GrResourceProvider::refScratchTexture(const GrSurfaceDesc& inDesc, scratchFlags); if (resource) { GrSurface* surface = static_cast<GrSurface*>(resource); - GrRenderTarget* rt = surface->asRenderTarget(); - if (rt && fGpu->caps()->discardRenderTargetSupport()) { - rt->discard(); - } return surface->asTexture(); } } diff --git a/src/gpu/ops/GrClearOp.h b/src/gpu/ops/GrClearOp.h index 1314d47c5d..5340af1172 100644 --- a/src/gpu/ops/GrClearOp.h +++ b/src/gpu/ops/GrClearOp.h @@ -55,14 +55,17 @@ public: const char* name() const override { return "Clear"; } SkString dumpInfo() const override { - SkString string("Scissor ["); + SkString string; + string.appendf("rtID: %d proxyID: %d Scissor [", + fRenderTarget.get()->uniqueID().asUInt(), + fProxyUniqueID.asUInt()); if (fClip.scissorEnabled()) { const SkIRect& r = fClip.scissorRect(); string.appendf("L: %d, T: %d, R: %d, B: %d", r.fLeft, r.fTop, r.fRight, r.fBottom); } else { string.append("disabled"); } - string.appendf("], Color: 0x%08x, RT: %d", fColor, fProxyUniqueID.asUInt()); + string.appendf("], Color: 0x%08x ", fColor); string.append(INHERITED::dumpInfo()); return string; } diff --git a/src/gpu/ops/GrDiscardOp.h b/src/gpu/ops/GrDiscardOp.h index 098df63e7a..ca7f0007de 100644 --- a/src/gpu/ops/GrDiscardOp.h +++ b/src/gpu/ops/GrDiscardOp.h @@ -16,23 +16,39 @@ class GrDiscardOp final : public GrOp { public: DEFINE_OP_CLASS_ID - static std::unique_ptr<GrOp> Make(GrRenderTarget* rt) { - return std::unique_ptr<GrOp>(new GrDiscardOp(rt)); + + // MDB TODO: replace the renderTargetContext with just the renderTargetProxy. + // For now, we need the renderTargetContext for its accessRenderTarget powers. + static std::unique_ptr<GrOp> Make(GrRenderTargetContext* rtc) { + + // MDB TODO: remove this. In this hybrid state we need to be sure the RT is instantiable + // so it can carry the IO refs. In the future we will just get the proxy and + // it carry the IO refs. + if (!rtc->accessRenderTarget()) { + return nullptr; + } + + return std::unique_ptr<GrOp>(new GrDiscardOp(rtc)); } const char* name() const override { return "Discard"; } SkString dumpInfo() const override { SkString string; - string.printf("RT: %d", fRenderTarget.get()->uniqueID().asUInt()); + string.printf("rtID: %d proxyID: %d ", fRenderTarget.get()->uniqueID().asUInt(), + fProxyUniqueID.asUInt()); string.append(INHERITED::dumpInfo()); return string; } private: - GrDiscardOp(GrRenderTarget* rt) : INHERITED(ClassID()), fRenderTarget(rt) { - this->setBounds(SkRect::MakeIWH(rt->width(), rt->height()), HasAABloat::kNo, + GrDiscardOp(GrRenderTargetContext* rtc) + : INHERITED(ClassID()) + , fProxyUniqueID(rtc->asSurfaceProxy()->uniqueID()) { + this->setBounds(SkRect::MakeIWH(rtc->width(), rtc->height()), HasAABloat::kNo, IsZeroArea::kNo); + + fRenderTarget.reset(rtc->accessRenderTarget()); } bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override { @@ -42,9 +58,12 @@ private: void onPrepare(GrOpFlushState*) override {} void onExecute(GrOpFlushState* state) override { + // MDB TODO: instantiate the renderTarget from the proxy in here state->commandBuffer()->discard(fRenderTarget.get()); } + // MDB TODO: remove this. When the renderTargetProxy carries the refs this will be redundant. + GrSurfaceProxy::UniqueID fProxyUniqueID; GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget; typedef GrOp INHERITED; |