diff options
-rw-r--r-- | gm/strokefill.cpp | 28 | ||||
-rw-r--r-- | gn/gpu.gni | 2 | ||||
-rw-r--r-- | include/private/GrSurfaceProxy.h | 7 | ||||
-rw-r--r-- | samplecode/SamplePathText.cpp | 9 | ||||
-rw-r--r-- | src/gpu/GrClipStackClip.cpp | 34 | ||||
-rw-r--r-- | src/gpu/GrClipStackClip.h | 3 | ||||
-rw-r--r-- | src/gpu/GrOpList.h | 2 | ||||
-rw-r--r-- | src/gpu/GrProcessor.h | 1 | ||||
-rw-r--r-- | src/gpu/GrReducedClip.cpp | 134 | ||||
-rw-r--r-- | src/gpu/GrReducedClip.h | 57 | ||||
-rw-r--r-- | src/gpu/GrRenderTargetContextPriv.h | 1 | ||||
-rw-r--r-- | src/gpu/ccpr/GrCCPRAtlas.h | 2 | ||||
-rw-r--r-- | src/gpu/ccpr/GrCCPRClipProcessor.cpp | 114 | ||||
-rw-r--r-- | src/gpu/ccpr/GrCCPRClipProcessor.h | 42 | ||||
-rw-r--r-- | src/gpu/ccpr/GrCCPRCoverageOp.cpp | 4 | ||||
-rw-r--r-- | src/gpu/ccpr/GrCCPRCoverageOp.h | 5 | ||||
-rw-r--r-- | src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp | 420 | ||||
-rw-r--r-- | src/gpu/ccpr/GrCoverageCountingPathRenderer.h | 193 | ||||
-rw-r--r-- | tests/GrCCPRTest.cpp | 48 | ||||
-rw-r--r-- | tools/gpu/GrTest.cpp | 4 |
20 files changed, 847 insertions, 263 deletions
diff --git a/gm/strokefill.cpp b/gm/strokefill.cpp index 2e96cd7c36..b28b415948 100644 --- a/gm/strokefill.cpp +++ b/gm/strokefill.cpp @@ -359,6 +359,34 @@ DEF_SIMPLE_GM(bug339297, canvas, 640, 480) { canvas->drawPath(path, paint); } +DEF_SIMPLE_GM(bug339297_as_clip, canvas, 640, 480) { + SkPath path; + path.moveTo(-469515, -10354890); + path.cubicTo(771919.62f, -10411179, 2013360.1f, -10243774, 3195542.8f, -9860664); + path.lineTo(3195550, -9860655); + path.lineTo(3195539, -9860652); + path.lineTo(3195539, -9860652); + path.lineTo(3195539, -9860652); + path.cubicTo(2013358.1f, -10243761, 771919.25f, -10411166, -469513.84f, -10354877); + path.lineTo(-469515, -10354890); + path.close(); + + canvas->translate(258, 10365663); + + canvas->save(); + canvas->clipPath(path, true); + canvas->clear(SK_ColorBLACK); + canvas->restore(); + + SkPaint paint; + paint.setAntiAlias(true); + paint.setStyle(SkPaint::kFill_Style); + paint.setColor(SK_ColorRED); + paint.setStyle(SkPaint::kStroke_Style); + paint.setStrokeWidth(1); + canvas->drawPath(path, paint); +} + DEF_SIMPLE_GM(bug6987, canvas, 200, 200) { SkPaint paint; paint.setStyle(SkPaint::kStroke_Style); diff --git a/gn/gpu.gni b/gn/gpu.gni index a26111dbc3..b89bd3bc84 100644 --- a/gn/gpu.gni +++ b/gn/gpu.gni @@ -296,6 +296,8 @@ skia_gpu_sources = [ # coverage counting path renderer "$_src/gpu/ccpr/GrCCPRAtlas.cpp", "$_src/gpu/ccpr/GrCCPRAtlas.h", + "$_src/gpu/ccpr/GrCCPRClipProcessor.cpp", + "$_src/gpu/ccpr/GrCCPRClipProcessor.h", "$_src/gpu/ccpr/GrCCPRCoverageOp.cpp", "$_src/gpu/ccpr/GrCCPRCoverageOp.h", "$_src/gpu/ccpr/GrCCPRCoverageProcessor.cpp", diff --git a/include/private/GrSurfaceProxy.h b/include/private/GrSurfaceProxy.h index 387aecd428..0e79a3f68b 100644 --- a/include/private/GrSurfaceProxy.h +++ b/include/private/GrSurfaceProxy.h @@ -49,6 +49,13 @@ public: this->didRemoveRefOrPendingIO(); } +#ifdef SK_DEBUG + bool isUnique_debugOnly() const { // For asserts. + SkASSERT(fRefCnt >= 0 && fPendingWrites >= 0 && fPendingReads >= 0); + return 1 == fRefCnt + fPendingWrites + fPendingReads; + } +#endif + void validate() const { #ifdef SK_DEBUG SkASSERT(fRefCnt >= 0); diff --git a/samplecode/SamplePathText.cpp b/samplecode/SamplePathText.cpp index 31b0a7d8b9..b6a730f6d7 100644 --- a/samplecode/SamplePathText.cpp +++ b/samplecode/SamplePathText.cpp @@ -66,14 +66,13 @@ public: void onDrawContent(SkCanvas* canvas) override { if (fDoClip) { - SkMatrix oldMatrix = canvas->getTotalMatrix(); - canvas->setMatrix(SkMatrix::MakeScale(this->width(), this->height())); + SkPath deviceSpaceClipPath = fClipPath; + deviceSpaceClipPath.transform(SkMatrix::MakeScale(this->width(), this->height())); canvas->save(); - canvas->clipPath(fClipPath, SkClipOp::kDifference, true); + canvas->clipPath(deviceSpaceClipPath, SkClipOp::kDifference, true); canvas->clear(SK_ColorBLACK); canvas->restore(); - canvas->clipPath(fClipPath, SkClipOp::kIntersect, true); - canvas->setMatrix(oldMatrix); + canvas->clipPath(deviceSpaceClipPath, SkClipOp::kIntersect, true); } this->drawGlyphs(canvas); } diff --git a/src/gpu/GrClipStackClip.cpp b/src/gpu/GrClipStackClip.cpp index bd6ed9afed..0f647e07b4 100644 --- a/src/gpu/GrClipStackClip.cpp +++ b/src/gpu/GrClipStackClip.cpp @@ -11,9 +11,9 @@ #include "GrContextPriv.h" #include "GrDeferredProxyUploader.h" #include "GrDrawingManager.h" -#include "GrRenderTargetContextPriv.h" #include "GrFixedClip.h" #include "GrGpuResourcePriv.h" +#include "GrRenderTargetContextPriv.h" #include "GrResourceProvider.h" #include "GrStencilAttachment.h" #include "GrSWMaskHelper.h" @@ -190,6 +190,8 @@ bool GrClipStackClip::apply(GrContext* context, GrRenderTargetContext* renderTar return true; } + const auto* caps = context->caps()->shaderCaps(); + int maxWindowRectangles = renderTargetContext->priv().maxWindowRectangles(); int maxAnalyticFPs = context->caps()->maxClipAnalyticFPs(); if (GrFSAAType::kNone != renderTargetContext->fsaaType()) { // With mixed samples (non-msaa color buffer), any coverage info is lost from color once it @@ -200,10 +202,13 @@ bool GrClipStackClip::apply(GrContext* context, GrRenderTargetContext* renderTar } SkASSERT(!context->caps()->avoidStencilBuffers()); // We disable MSAA when avoiding stencil. } + auto* ccpr = context->contextPriv().drawingManager()->getCoverageCountingPathRenderer(); - const auto* caps = context->caps()->shaderCaps(); - GrReducedClip reducedClip(*fStack, devBounds, caps, - renderTargetContext->priv().maxWindowRectangles(), maxAnalyticFPs); + GrReducedClip reducedClip(*fStack, devBounds, caps, maxWindowRectangles, maxAnalyticFPs, ccpr); + if (InitialState::kAllOut == reducedClip.initialState() && + reducedClip.maskElements().isEmpty()) { + return false; + } if (reducedClip.hasScissor() && !GrClip::IsInsideClip(reducedClip.scissor(), devBounds)) { out->hardClip().addScissor(reducedClip.scissor(), bounds); @@ -214,14 +219,27 @@ bool GrClipStackClip::apply(GrContext* context, GrRenderTargetContext* renderTar GrWindowRectsState::Mode::kExclusive); } - if (std::unique_ptr<GrFragmentProcessor> clipFPs = reducedClip.detachAnalyticFPs()) { - out->addCoverageFP(std::move(clipFPs)); + if (!reducedClip.maskElements().isEmpty()) { + if (!this->applyClipMask(context, renderTargetContext, reducedClip, hasUserStencilSettings, + out)) { + return false; + } } - if (reducedClip.maskElements().isEmpty()) { - return InitialState::kAllIn == reducedClip.initialState(); + // The opList ID must not be looked up until AFTER producing the clip mask (if any). That step + // can cause a flush or otherwise change which opList our draw is going into. + uint32_t opListID = renderTargetContext->getOpList()->uniqueID(); + int rtWidth = renderTargetContext->width(), rtHeight = renderTargetContext->height(); + if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(opListID, rtWidth, rtHeight)) { + out->addCoverageFP(std::move(clipFPs)); } + return true; +} + +bool GrClipStackClip::applyClipMask(GrContext* context, GrRenderTargetContext* renderTargetContext, + const GrReducedClip& reducedClip, bool hasUserStencilSettings, + GrAppliedClip* out) const { #ifdef SK_DEBUG SkASSERT(reducedClip.hasScissor()); SkIRect rtIBounds = SkIRect::MakeWH(renderTargetContext->width(), diff --git a/src/gpu/GrClipStackClip.h b/src/gpu/GrClipStackClip.h index ae14bb35be..aeb983468c 100644 --- a/src/gpu/GrClipStackClip.h +++ b/src/gpu/GrClipStackClip.h @@ -46,6 +46,9 @@ private: GrPathRenderer** prOut, bool needsStencil); + bool applyClipMask(GrContext*, GrRenderTargetContext*, const GrReducedClip&, + bool hasUserStencilSettings, GrAppliedClip*) const; + // Creates an alpha mask of the clip. The mask is a rasterization of elements through the // rect specified by clipSpaceIBounds. sk_sp<GrTextureProxy> createAlphaClipMask(GrContext*, const GrReducedClip&) const; diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h index 31c8212119..90629642f6 100644 --- a/src/gpu/GrOpList.h +++ b/src/gpu/GrOpList.h @@ -102,7 +102,7 @@ public: */ virtual GrRenderTargetOpList* asRenderTargetOpList() { return nullptr; } - int32_t uniqueID() const { return fUniqueID; } + uint32_t uniqueID() const { return fUniqueID; } /* * Dump out the GrOpList dependency DAG diff --git a/src/gpu/GrProcessor.h b/src/gpu/GrProcessor.h index 69372b1ca7..5947f9fe27 100644 --- a/src/gpu/GrProcessor.h +++ b/src/gpu/GrProcessor.h @@ -67,6 +67,7 @@ public: enum ClassID { kBigKeyProcessor_ClassID, kBlockInputFragmentProcessor_ClassID, + kCCPRClipProcessor_ClassID, kCircleGeometryProcessor_ClassID, kCircleInside2PtConicalEffect_ClassID, kCircleOutside2PtConicalEffect_ClassID, diff --git a/src/gpu/GrReducedClip.cpp b/src/gpu/GrReducedClip.cpp index f481a10b1c..83079d3e66 100644 --- a/src/gpu/GrReducedClip.cpp +++ b/src/gpu/GrReducedClip.cpp @@ -21,6 +21,8 @@ #include "GrStyle.h" #include "GrUserStencilSettings.h" #include "SkClipOpPriv.h" +#include "ccpr/GrCoverageCountingPathRenderer.h" +#include "effects/GrAARectEffect.h" #include "effects/GrConvexPolyEffect.h" #include "effects/GrRRectEffect.h" @@ -32,8 +34,12 @@ * take a rect in case the caller knows a bound on what is to be drawn through this clip. */ GrReducedClip::GrReducedClip(const SkClipStack& stack, const SkRect& queryBounds, - const GrShaderCaps* caps, int maxWindowRectangles, int maxAnalyticFPs) - : fCaps(caps), fMaxWindowRectangles(maxWindowRectangles), fMaxAnalyticFPs(maxAnalyticFPs) { + const GrShaderCaps* caps, int maxWindowRectangles, int maxAnalyticFPs, + GrCoverageCountingPathRenderer* ccpr) + : fCaps(caps) + , fMaxWindowRectangles(maxWindowRectangles) + , fMaxAnalyticFPs(maxAnalyticFPs) + , fCCPR(fMaxAnalyticFPs ? ccpr : nullptr) { SkASSERT(!queryBounds.isEmpty()); SkASSERT(fMaxWindowRectangles <= GrWindowRectangles::kMaxWindows); fHasScissor = false; @@ -175,6 +181,12 @@ void GrReducedClip::walkStack(const SkClipStack& stack, const SkRect& queryBound } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) { initialTriState = InitialTriState::kAllOut; skippable = true; + } else if (!embiggens) { + ClipResult result = this->clipInsideElement(element); + if (ClipResult::kMadeEmpty == result) { + return; + } + skippable = (ClipResult::kClipped == result); } } else { if (element->contains(relaxedQueryBounds)) { @@ -204,6 +216,12 @@ void GrReducedClip::walkStack(const SkClipStack& stack, const SkRect& queryBound skippable = true; } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) { skippable = true; + } else if (!embiggens) { + ClipResult result = this->clipOutsideElement(element); + if (ClipResult::kMadeEmpty == result) { + return; + } + skippable = (ClipResult::kClipped == result); } } else { if (element->contains(relaxedQueryBounds)) { @@ -305,6 +323,15 @@ void GrReducedClip::walkStack(const SkClipStack& stack, const SkRect& queryBound } else if (GrClip::IsOutsideClip(element->getBounds(), queryBounds)) { initialTriState = InitialTriState::kAllIn; skippable = true; + } else if (!embiggens) { + ClipResult result = this->clipOutsideElement(element); + if (ClipResult::kMadeEmpty == result) { + return; + } + if (ClipResult::kClipped == result) { + initialTriState = InitialTriState::kAllIn; + skippable = true; + } } } else { if (element->contains(relaxedQueryBounds)) { @@ -466,6 +493,7 @@ GrReducedClip::ClipResult GrReducedClip::clipInsideElement(const Element* elemen case Element::DeviceSpaceType::kRect: SkASSERT(element->getBounds() == element->getDeviceSpaceRect()); + SkASSERT(!element->isInverseFilled()); if (element->isAA()) { if (SK_InvalidGenID == fAAClipRectGenID) { // No AA clip rect yet? fAAClipRect = element->getDeviceSpaceRect(); @@ -483,12 +511,13 @@ GrReducedClip::ClipResult GrReducedClip::clipInsideElement(const Element* elemen return ClipResult::kClipped; case Element::DeviceSpaceType::kRRect: + SkASSERT(!element->isInverseFilled()); return this->addAnalyticFP(element->getDeviceSpaceRRect(), Invert::kNo, GrAA(element->isAA())); case Element::DeviceSpaceType::kPath: - return this->addAnalyticFP(element->getDeviceSpacePath(), Invert::kNo, - GrAA(element->isAA())); + return this->addAnalyticFP(element->getDeviceSpacePath(), + Invert(element->isInverseFilled()), GrAA(element->isAA())); } SK_ABORT("Unexpected DeviceSpaceType"); @@ -501,6 +530,7 @@ GrReducedClip::ClipResult GrReducedClip::clipOutsideElement(const Element* eleme return ClipResult::kMadeEmpty; case Element::DeviceSpaceType::kRect: + SkASSERT(!element->isInverseFilled()); if (fWindowRects.count() < fMaxWindowRectangles) { // Clip out the inside of every rect. We won't be able to entirely skip the AA ones, // but it saves processing time. @@ -513,6 +543,7 @@ GrReducedClip::ClipResult GrReducedClip::clipOutsideElement(const Element* eleme GrAA(element->isAA())); case Element::DeviceSpaceType::kRRect: { + SkASSERT(!element->isInverseFilled()); const SkRRect& clipRRect = element->getDeviceSpaceRRect(); ClipResult clipResult = this->addAnalyticFP(clipRRect, Invert::kYes, GrAA(element->isAA())); @@ -552,8 +583,8 @@ GrReducedClip::ClipResult GrReducedClip::clipOutsideElement(const Element* eleme } case Element::DeviceSpaceType::kPath: - return this->addAnalyticFP(element->getDeviceSpacePath(), Invert::kYes, - GrAA(element->isAA())); + return this->addAnalyticFP(element->getDeviceSpacePath(), + Invert(!element->isInverseFilled()), GrAA(element->isAA())); } SK_ABORT("Unexpected DeviceSpaceType"); @@ -572,45 +603,65 @@ inline void GrReducedClip::addWindowRectangle(const SkRect& elementInteriorRect, } } -template<typename T> -inline GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const T& deviceSpaceShape, - Invert invert, GrAA aa) { - if (fAnalyticFPs.count() >= fMaxAnalyticFPs) { +GrClipEdgeType GrReducedClip::GetClipEdgeType(Invert invert, GrAA aa) { + if (Invert::kNo == invert) { + return (GrAA::kYes == aa) ? GrClipEdgeType::kFillAA : GrClipEdgeType::kFillBW; + } else { + return (GrAA::kYes == aa) ? GrClipEdgeType::kInverseFillAA : GrClipEdgeType::kInverseFillBW; + } +} + +GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const SkRect& deviceSpaceRect, + Invert invert, GrAA aa) { + if (this->numAnalyticFPs() >= fMaxAnalyticFPs) { return ClipResult::kNotClipped; } - GrClipEdgeType edgeType; - if (Invert::kNo == invert) { - edgeType = (GrAA::kYes == aa) ? GrClipEdgeType::kFillAA : GrClipEdgeType::kFillBW; - } else { - edgeType = (GrAA::kYes == aa) ? GrClipEdgeType::kInverseFillAA - : GrClipEdgeType::kInverseFillBW; + fAnalyticFPs.push_back(GrAARectEffect::Make(GetClipEdgeType(invert, aa), deviceSpaceRect)); + SkASSERT(fAnalyticFPs.back()); + + return ClipResult::kClipped; +} + +GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const SkRRect& deviceSpaceRRect, + Invert invert, GrAA aa) { + if (this->numAnalyticFPs() >= fMaxAnalyticFPs) { + return ClipResult::kNotClipped; } - if (auto fp = make_analytic_clip_fp(edgeType, deviceSpaceShape, *fCaps)) { + if (auto fp = GrRRectEffect::Make(GetClipEdgeType(invert, aa), deviceSpaceRRect, *fCaps)) { fAnalyticFPs.push_back(std::move(fp)); return ClipResult::kClipped; } - return ClipResult::kNotClipped; + SkPath deviceSpacePath; + deviceSpacePath.setIsVolatile(true); + deviceSpacePath.addRRect(deviceSpaceRRect); + return this->addAnalyticFP(deviceSpacePath, invert, aa); } -std::unique_ptr<GrFragmentProcessor> make_analytic_clip_fp(GrClipEdgeType edgeType, - const SkRect& deviceSpaceRect, - const GrShaderCaps&) { - return GrConvexPolyEffect::Make(edgeType, deviceSpaceRect); -} +GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const SkPath& deviceSpacePath, + Invert invert, GrAA aa) { + if (this->numAnalyticFPs() >= fMaxAnalyticFPs) { + return ClipResult::kNotClipped; + } -std::unique_ptr<GrFragmentProcessor> make_analytic_clip_fp(GrClipEdgeType edgeType, - const SkRRect& deviceSpaceRRect, - const GrShaderCaps& caps) { - return GrRRectEffect::Make(edgeType, deviceSpaceRRect, caps); -} + if (auto fp = GrConvexPolyEffect::Make(GetClipEdgeType(invert, aa), deviceSpacePath)) { + fAnalyticFPs.push_back(std::move(fp)); + return ClipResult::kClipped; + } + + if (fCCPR && GrAA::kYes == aa && fCCPR->canMakeClipProcessor(deviceSpacePath)) { + // Set aside CCPR paths for later. We will create their clip FPs once we know the ID of the + // opList they will operate in. + SkPath& ccprClipPath = fCCPRClipPaths.push_back(deviceSpacePath); + if (Invert::kYes == invert) { + ccprClipPath.toggleInverseFillType(); + } + return ClipResult::kClipped; + } -std::unique_ptr<GrFragmentProcessor> make_analytic_clip_fp(GrClipEdgeType edgeType, - const SkPath& deviceSpacePath, - const GrShaderCaps&) { - return GrConvexPolyEffect::Make(edgeType, deviceSpacePath); + return ClipResult::kNotClipped; } void GrReducedClip::makeEmpty() { @@ -902,3 +953,22 @@ bool GrReducedClip::drawStencilClipMask(GrContext* context, } return true; } + +std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticFPs(uint32_t opListID, + int rtWidth, + int rtHeight) { + // Make sure finishAndDetachAnalyticFPs hasn't been called already. + SkDEBUGCODE(for (const auto& fp : fAnalyticFPs) { SkASSERT(fp); }) + + if (!fCCPRClipPaths.empty()) { + fAnalyticFPs.reserve(fAnalyticFPs.count() + fCCPRClipPaths.count()); + for (const SkPath& ccprClipPath : fCCPRClipPaths) { + SkASSERT(fHasScissor); + auto fp = fCCPR->makeClipProcessor(opListID, ccprClipPath, fScissor, rtWidth, rtHeight); + fAnalyticFPs.push_back(std::move(fp)); + } + fCCPRClipPaths.reset(); + } + + return GrFragmentProcessor::RunInSeries(fAnalyticFPs.begin(), fAnalyticFPs.count()); +} diff --git a/src/gpu/GrReducedClip.h b/src/gpu/GrReducedClip.h index 0922d9126f..f47b09ab64 100644 --- a/src/gpu/GrReducedClip.h +++ b/src/gpu/GrReducedClip.h @@ -14,6 +14,7 @@ #include "SkTLList.h" class GrContext; +class GrCoverageCountingPathRenderer; class GrRenderTargetContext; /** @@ -26,7 +27,15 @@ public: using ElementList = SkTLList<SkClipStack::Element, 16>; GrReducedClip(const SkClipStack&, const SkRect& queryBounds, const GrShaderCaps* caps, - int maxWindowRectangles = 0, int maxAnalyticFPs = 0); + int maxWindowRectangles = 0, int maxAnalyticFPs = 0, + GrCoverageCountingPathRenderer* = nullptr); + + enum class InitialState : bool { + kAllIn, + kAllOut + }; + + InitialState initialState() const { return fInitialState; } /** * If hasScissor() is true, the clip mask is not valid outside this rect and the caller must @@ -50,13 +59,6 @@ public: */ const GrWindowRectangles& windowRectangles() const { return fWindowRects; } - int numAnalyticFPs() const { return fAnalyticFPs.count(); } - - std::unique_ptr<GrFragmentProcessor> detachAnalyticFPs() { - SkDEBUGCODE(for (const auto& fp : fAnalyticFPs) { SkASSERT(fp); }) - return GrFragmentProcessor::RunInSeries(fAnalyticFPs.begin(), fAnalyticFPs.count()); - } - /** * An ordered list of clip elements that could not be skipped or implemented by other means. If * nonempty, the caller must create an alpha and/or stencil mask for these elements and apply it @@ -67,8 +69,10 @@ public: /** * If maskElements() are nonempty, uniquely identifies the region of the clip mask that falls * inside of scissor(). + * * NOTE: since clip elements might fall outside the query bounds, different regions of the same * clip stack might have more or less restrictive IDs. + * * FIXME: this prevents us from reusing a sub-rect of a perfectly good mask when that rect has * been assigned a less restrictive ID. */ @@ -79,16 +83,23 @@ public: */ bool maskRequiresAA() const { SkASSERT(!fMaskElements.isEmpty()); return fMaskRequiresAA; } - enum class InitialState : bool { - kAllIn, - kAllOut - }; - - InitialState initialState() const { return fInitialState; } - bool drawAlphaClipMask(GrRenderTargetContext*) const; bool drawStencilClipMask(GrContext*, GrRenderTargetContext*) const; + int numAnalyticFPs() const { return fAnalyticFPs.count() + fCCPRClipPaths.count(); } + + /** + * Called once the client knows the ID of the opList that the clip FPs will operate in. This + * method finishes any outstanding work that was waiting for the opList ID, then detaches and + * returns this class's list of FPs that complete the clip. + * + * NOTE: this must be called AFTER producing the clip mask (if any) because draw calls on + * the render target context, surface allocations, and even switching render targets (pre MDB) + * may cause flushes or otherwise change which opList the actual draw is going into. + */ + std::unique_ptr<GrFragmentProcessor> finishAndDetachAnalyticFPs(uint32_t opListID, int rtWidth, + int rtHeight); + private: void walkStack(const SkClipStack&, const SkRect& queryBounds); @@ -98,11 +109,11 @@ private: kMadeEmpty }; - // Clips the the given element's interior out of the final clip. + // Intersects the clip with the element's interior, regardless of inverse fill type. // NOTE: do not call for elements followed by ops that can grow the clip. ClipResult clipInsideElement(const Element*); - // Clips the the given element's exterior out of the final clip. + // Intersects the clip with the element's exterior, regardless of inverse fill type. // NOTE: do not call for elements followed by ops that can grow the clip. ClipResult clipOutsideElement(const Element*); @@ -113,23 +124,29 @@ private: kYes = true }; - template<typename T> ClipResult addAnalyticFP(const T& deviceSpaceShape, Invert, GrAA); + static GrClipEdgeType GetClipEdgeType(Invert, GrAA); + ClipResult addAnalyticFP(const SkRect& deviceSpaceRect, Invert, GrAA); + ClipResult addAnalyticFP(const SkRRect& deviceSpaceRRect, Invert, GrAA); + ClipResult addAnalyticFP(const SkPath& deviceSpacePath, Invert, GrAA); void makeEmpty(); const GrShaderCaps* fCaps; const int fMaxWindowRectangles; const int fMaxAnalyticFPs; + GrCoverageCountingPathRenderer* const fCCPR; + + InitialState fInitialState; SkIRect fScissor; bool fHasScissor; SkRect fAAClipRect; uint32_t fAAClipRectGenID; // GenID the mask will have if includes the AA clip rect. GrWindowRectangles fWindowRects; - SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> fAnalyticFPs; ElementList fMaskElements; uint32_t fMaskGenID; bool fMaskRequiresAA; - InitialState fInitialState; + SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> fAnalyticFPs; + SkSTArray<4, SkPath> fCCPRClipPaths; // Will convert to FPs once we have an opList ID for CCPR. }; #endif diff --git a/src/gpu/GrRenderTargetContextPriv.h b/src/gpu/GrRenderTargetContextPriv.h index 271badb64c..f74471b038 100644 --- a/src/gpu/GrRenderTargetContextPriv.h +++ b/src/gpu/GrRenderTargetContextPriv.h @@ -108,6 +108,7 @@ public: return fRenderTargetContext->fRenderTargetProxy->uniqueID(); } + uint32_t testingOnly_getOpListID(); uint32_t testingOnly_addDrawOp(std::unique_ptr<GrDrawOp>); uint32_t testingOnly_addDrawOp(const GrClip&, std::unique_ptr<GrDrawOp>); diff --git a/src/gpu/ccpr/GrCCPRAtlas.h b/src/gpu/ccpr/GrCCPRAtlas.h index a9ccd73c1c..aa77e351d8 100644 --- a/src/gpu/ccpr/GrCCPRAtlas.h +++ b/src/gpu/ccpr/GrCCPRAtlas.h @@ -36,7 +36,7 @@ public: sk_sp<GrRenderTargetContext> SK_WARN_UNUSED_RESULT finalize(GrOnFlushResourceProvider*, std::unique_ptr<GrDrawOp> atlasOp); - sk_sp<GrTextureProxy> textureProxy() const { return fTextureProxy; } + GrTextureProxy* textureProxy() const { return fTextureProxy.get(); } private: class Node; diff --git a/src/gpu/ccpr/GrCCPRClipProcessor.cpp b/src/gpu/ccpr/GrCCPRClipProcessor.cpp new file mode 100644 index 0000000000..7edcf92f3f --- /dev/null +++ b/src/gpu/ccpr/GrCCPRClipProcessor.cpp @@ -0,0 +1,114 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "GrCCPRClipProcessor.h" + +#include "GrTexture.h" +#include "GrTextureProxy.h" +#include "SkMakeUnique.h" +#include "glsl/GrGLSLFragmentProcessor.h" +#include "glsl/GrGLSLFragmentShaderBuilder.h" + +GrCCPRClipProcessor::GrCCPRClipProcessor(const ClipPath* clipPath, MustCheckBounds mustCheckBounds, + SkPath::FillType overrideFillType) + : INHERITED(kCCPRClipProcessor_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag) + , fClipPath(clipPath) + , fMustCheckBounds((bool)mustCheckBounds) + , fOverrideFillType(overrideFillType) + , fAtlasAccess(sk_ref_sp(fClipPath->atlasLazyProxy()), GrSamplerState::Filter::kNearest, + GrSamplerState::WrapMode::kClamp, kFragment_GrShaderFlag) { + this->addTextureSampler(&fAtlasAccess); +} + +std::unique_ptr<GrFragmentProcessor> GrCCPRClipProcessor::clone() const { + return skstd::make_unique<GrCCPRClipProcessor>(fClipPath, MustCheckBounds(fMustCheckBounds), + fOverrideFillType); +} + +void GrCCPRClipProcessor::onGetGLSLProcessorKey(const GrShaderCaps&, + GrProcessorKeyBuilder* b) const { + b->add32((fOverrideFillType << 1) | (int)fMustCheckBounds); +} + +bool GrCCPRClipProcessor::onIsEqual(const GrFragmentProcessor& fp) const { + const GrCCPRClipProcessor& that = fp.cast<GrCCPRClipProcessor>(); + // Each ClipPath path has a unique atlas proxy, so hasSameSamplersAndAccesses should have + // already weeded out FPs with different ClipPaths. + SkASSERT(that.fClipPath->deviceSpacePath().getGenerationID() == + fClipPath->deviceSpacePath().getGenerationID()); + return that.fOverrideFillType == fOverrideFillType; +} + +class GrCCPRClipProcessor::Impl : public GrGLSLFragmentProcessor { +public: + void emitCode(EmitArgs& args) override { + const GrCCPRClipProcessor& proc = args.fFp.cast<GrCCPRClipProcessor>(); + GrGLSLUniformHandler* uniHandler = args.fUniformHandler; + GrGLSLFPFragmentBuilder* f = args.fFragBuilder; + + f->codeAppend ("half coverage;"); + if (proc.fMustCheckBounds) { + const char* pathIBounds; + fPathIBoundsUniform = uniHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType, + "path_ibounds", &pathIBounds); + f->codeAppendf("if (all(greaterThan(float4(sk_FragCoord.xy, %s.zw), " + "float4(%s.xy, sk_FragCoord.xy)))) {", + pathIBounds, pathIBounds); + } + + const char* atlasTransform; + fAtlasTransformUniform = uniHandler->addUniform(kFragment_GrShaderFlag, kFloat4_GrSLType, + "atlas_transform", &atlasTransform); + f->codeAppendf("float2 texcoord = sk_FragCoord.xy * %s.xy + %s.zw;", + atlasTransform, atlasTransform); + + f->codeAppend ("half coverage_count = "); + f->appendTextureLookup(args.fTexSamplers[0], "texcoord", kHalf2_GrSLType); + f->codeAppend (".a;"); + + if (SkPath::kEvenOdd_FillType == proc.fOverrideFillType || + SkPath::kInverseEvenOdd_FillType == proc.fOverrideFillType) { + f->codeAppend ("half t = mod(abs(coverage_count), 2);"); + f->codeAppend ("coverage = 1 - abs(t - 1);"); + } else { + f->codeAppend ("coverage = min(abs(coverage_count), 1);"); + } + + if (proc.fMustCheckBounds) { + f->codeAppend ("} else {"); + f->codeAppend ( "coverage = 0;"); + f->codeAppend ("}"); + } + + if (SkPath::IsInverseFillType(proc.fOverrideFillType)) { + f->codeAppend ("coverage = 1 - coverage;"); + } + + f->codeAppendf("%s = %s * coverage;", args.fOutputColor, args.fInputColor); + } + + void onSetData(const GrGLSLProgramDataManager& pdman, + const GrFragmentProcessor& fp) override { + const GrCCPRClipProcessor& proc = fp.cast<GrCCPRClipProcessor>(); + if (proc.fMustCheckBounds) { + const SkRect pathIBounds = SkRect::Make(proc.fClipPath->pathDevIBounds()); + pdman.set4f(fPathIBoundsUniform, pathIBounds.left(), pathIBounds.top(), + pathIBounds.right(), pathIBounds.bottom()); + } + const SkVector& scale = proc.fClipPath->atlasScale(); + const SkVector& trans = proc.fClipPath->atlasTranslate(); + pdman.set4f(fAtlasTransformUniform, scale.x(), scale.y(), trans.x(), trans.y()); + } + +private: + UniformHandle fPathIBoundsUniform; + UniformHandle fAtlasTransformUniform; +}; + +GrGLSLFragmentProcessor* GrCCPRClipProcessor::onCreateGLSLInstance() const { + return new Impl(); +} diff --git a/src/gpu/ccpr/GrCCPRClipProcessor.h b/src/gpu/ccpr/GrCCPRClipProcessor.h new file mode 100644 index 0000000000..c6f4d947d4 --- /dev/null +++ b/src/gpu/ccpr/GrCCPRClipProcessor.h @@ -0,0 +1,42 @@ +/* + * Copyright 2017 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrCCPRClipProcessor_DEFINED +#define GrCCPRClipProcessor_DEFINED + +#include "GrFragmentProcessor.h" +#include "ccpr/GrCoverageCountingPathRenderer.h" + +class GrCCPRClipProcessor : public GrFragmentProcessor { +public: + using ClipPath = GrCoverageCountingPathRenderer::ClipPath; + + enum class MustCheckBounds : bool { + kNo = false, + kYes = true + }; + + GrCCPRClipProcessor(const ClipPath*, MustCheckBounds, SkPath::FillType overrideFillType); + + const char* name() const override { return "GrCCPRClipProcessor"; } + std::unique_ptr<GrFragmentProcessor> clone() const override; + void onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const override; + bool onIsEqual(const GrFragmentProcessor&) const override; + GrGLSLFragmentProcessor* onCreateGLSLInstance() const override; + +private: + const ClipPath* const fClipPath; + const bool fMustCheckBounds; + const SkPath::FillType fOverrideFillType; + const TextureSampler fAtlasAccess; + + class Impl; + + typedef GrFragmentProcessor INHERITED; +}; + +#endif diff --git a/src/gpu/ccpr/GrCCPRCoverageOp.cpp b/src/gpu/ccpr/GrCCPRCoverageOp.cpp index 3ed8a76c9b..4d985a563f 100644 --- a/src/gpu/ccpr/GrCCPRCoverageOp.cpp +++ b/src/gpu/ccpr/GrCCPRCoverageOp.cpp @@ -77,6 +77,10 @@ void GrCCPRCoverageOpsBuilder::parsePath(const SkMatrix& m, const SkPath& path, this->parsePath(path, fLocalDevPtsBuffer.get()); } +void GrCCPRCoverageOpsBuilder::parseDeviceSpacePath(const SkPath& deviceSpacePath) { + this->parsePath(deviceSpacePath, SkPathPriv::PointData(deviceSpacePath)); +} + void GrCCPRCoverageOpsBuilder::parsePath(const SkPath& path, const SkPoint* deviceSpacePts) { SkASSERT(!fParsingPath); SkDEBUGCODE(fParsingPath = true); diff --git a/src/gpu/ccpr/GrCCPRCoverageOp.h b/src/gpu/ccpr/GrCCPRCoverageOp.h index b75783ea9a..ba818a1b12 100644 --- a/src/gpu/ccpr/GrCCPRCoverageOp.h +++ b/src/gpu/ccpr/GrCCPRCoverageOp.h @@ -64,6 +64,11 @@ public: // | 1 1 | void parsePath(const SkMatrix&, const SkPath&, SkRect* devBounds, SkRect* devBounds45); + // Parses a device-space SkPath into a temporary staging area. The path will not yet be included + // in the next Op unless there is a matching call to saveParsedPath. The user must complement + // this with a following call to either saveParsedPath or discardParsedPath. + void parseDeviceSpacePath(const SkPath&); + // Commits the currently-parsed path from staging to the next Op, and specifies whether the mask // should be rendered with a scissor clip in effect. Accepts an optional post-device-space // translate for placement in an atlas. diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp index d32ad12690..95fd619dcd 100644 --- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp +++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp @@ -16,10 +16,25 @@ #include "SkPathOps.h" #include "GrOpFlushState.h" #include "GrRenderTargetOpList.h" +#include "GrTexture.h" #include "GrStyle.h" -#include "ccpr/GrCCPRPathProcessor.h" +#include "ccpr/GrCCPRClipProcessor.h" -using DrawPathsOp = GrCoverageCountingPathRenderer::DrawPathsOp; +// Shorthand for keeping line lengths under control with nested classes... +using CCPR = GrCoverageCountingPathRenderer; + +// If a path spans more pixels than this, we need to crop it or else analytic AA can run out of fp32 +// precision. +static constexpr float kPathCropThreshold = 1 << 16; + +static void crop_path(const SkPath& path, const SkIRect& cropbox, SkPath* out) { + SkPath cropPath; + cropPath.addRect(SkRect::Make(cropbox)); + if (!Op(cropPath, path, kIntersect_SkPathOp, out)) { + // This can fail if the PathOps encounter NaN or infinities. + out->reset(); + } +} bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) { const GrShaderCaps& shaderCaps = *caps.shaderCaps(); @@ -92,36 +107,31 @@ bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) { return true; } -GrCoverageCountingPathRenderer::DrawPathsOp::DrawPathsOp(GrCoverageCountingPathRenderer* ccpr, - const DrawPathArgs& args, GrColor color) +CCPR::DrawPathsOp::DrawPathsOp(GrCoverageCountingPathRenderer* ccpr, const DrawPathArgs& args, + GrColor color) : INHERITED(ClassID()) , fCCPR(ccpr) , fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(args.fPaint)) , fProcessors(std::move(args.fPaint)) , fTailDraw(&fHeadDraw) - , fOwningRTPendingOps(nullptr) { + , fOwningRTPendingPaths(nullptr) { SkDEBUGCODE(++fCCPR->fPendingDrawOpsCount); SkDEBUGCODE(fBaseInstance = -1); - SkDEBUGCODE(fDebugInstanceCount = 1;) - SkDEBUGCODE(fDebugSkippedInstances = 0;) + SkDEBUGCODE(fInstanceCount = 1;) + SkDEBUGCODE(fNumSkippedInstances = 0;) GrRenderTargetContext* const rtc = args.fRenderTargetContext; SkRect devBounds; args.fViewMatrix->mapRect(&devBounds, args.fShape->bounds()); args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &fHeadDraw.fClipIBounds, nullptr); - if (SkTMax(devBounds.height(), devBounds.width()) > (1 << 16)) { - // The path is too large. We need to crop it or risk running out of fp32 precision for - // analytic AA. - SkPath cropPath, path; - cropPath.addRect(SkRect::Make(fHeadDraw.fClipIBounds)); + if (SkTMax(devBounds.height(), devBounds.width()) > kPathCropThreshold) { + // The path is too large. We need to crop it or analytic AA can run out of fp32 precision. + SkPath path; args.fShape->asPath(&path); path.transform(*args.fViewMatrix); fHeadDraw.fMatrix.setIdentity(); - if (!Op(cropPath, path, kIntersect_SkPathOp, &fHeadDraw.fPath)) { - // This can fail if the PathOps encounter NaN or infinities. - fHeadDraw.fPath.reset(); - } + crop_path(path, fHeadDraw.fClipIBounds, &fHeadDraw.fPath); devBounds = fHeadDraw.fPath.getBounds(); } else { fHeadDraw.fMatrix = *args.fViewMatrix; @@ -134,20 +144,20 @@ GrCoverageCountingPathRenderer::DrawPathsOp::DrawPathsOp(GrCoverageCountingPathR this->setBounds(devBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo); } -GrCoverageCountingPathRenderer::DrawPathsOp::~DrawPathsOp() { - if (fOwningRTPendingOps) { +CCPR::DrawPathsOp::~DrawPathsOp() { + if (fOwningRTPendingPaths) { // Remove CCPR's dangling pointer to this Op before deleting it. - SkASSERT(!fCCPR->fFlushing); - fOwningRTPendingOps->fOpList.remove(this); + fOwningRTPendingPaths->fDrawOps.remove(this); } SkDEBUGCODE(--fCCPR->fPendingDrawOpsCount); } -GrDrawOp::RequiresDstTexture DrawPathsOp::finalize(const GrCaps& caps, const GrAppliedClip* clip, - GrPixelConfigIsClamped dstIsClamped) { +GrDrawOp::RequiresDstTexture CCPR::DrawPathsOp::finalize(const GrCaps& caps, + const GrAppliedClip* clip, + GrPixelConfigIsClamped dstIsClamped) { SkASSERT(!fCCPR->fFlushing); // There should only be one single path draw in this Op right now. - SkASSERT(1 == fDebugInstanceCount); + SkASSERT(1 == fInstanceCount); SkASSERT(&fHeadDraw == fTailDraw); GrProcessorSet::Analysis analysis = fProcessors.finalize( fHeadDraw.fColor, GrProcessorAnalysisCoverage::kSingleChannel, clip, false, caps, @@ -155,14 +165,14 @@ GrDrawOp::RequiresDstTexture DrawPathsOp::finalize(const GrCaps& caps, const GrA return analysis.requiresDstTexture() ? RequiresDstTexture::kYes : RequiresDstTexture::kNo; } -bool DrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) { +bool CCPR::DrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) { DrawPathsOp* that = op->cast<DrawPathsOp>(); SkASSERT(fCCPR == that->fCCPR); SkASSERT(!fCCPR->fFlushing); - SkASSERT(fOwningRTPendingOps); - SkASSERT(fDebugInstanceCount); - SkASSERT(!that->fOwningRTPendingOps || that->fOwningRTPendingOps == fOwningRTPendingOps); - SkASSERT(that->fDebugInstanceCount); + SkASSERT(fOwningRTPendingPaths); + SkASSERT(fInstanceCount); + SkASSERT(!that->fOwningRTPendingPaths || that->fOwningRTPendingPaths == fOwningRTPendingPaths); + SkASSERT(that->fInstanceCount); if (this->getFillType() != that->getFillType() || fSRGBFlags != that->fSRGBFlags || @@ -170,83 +180,152 @@ bool DrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) { return false; } - fTailDraw->fNext = &fOwningRTPendingOps->fDrawsAllocator.push_back(that->fHeadDraw); + fTailDraw->fNext = &fOwningRTPendingPaths->fDrawsAllocator.push_back(that->fHeadDraw); fTailDraw = (that->fTailDraw == &that->fHeadDraw) ? fTailDraw->fNext : that->fTailDraw; this->joinBounds(*that); - SkDEBUGCODE(fDebugInstanceCount += that->fDebugInstanceCount;) - SkDEBUGCODE(that->fDebugInstanceCount = 0); + SkDEBUGCODE(fInstanceCount += that->fInstanceCount;) + SkDEBUGCODE(that->fInstanceCount = 0); return true; } -void DrawPathsOp::wasRecorded(GrRenderTargetOpList* opList) { +void CCPR::DrawPathsOp::wasRecorded(GrRenderTargetOpList* opList) { SkASSERT(!fCCPR->fFlushing); - SkASSERT(!fOwningRTPendingOps); - fOwningRTPendingOps = &fCCPR->fRTPendingOpsMap[opList->uniqueID()]; - fOwningRTPendingOps->fOpList.addToTail(this); + SkASSERT(!fOwningRTPendingPaths); + fOwningRTPendingPaths = &fCCPR->fRTPendingPathsMap[opList->uniqueID()]; + fOwningRTPendingPaths->fDrawOps.addToTail(this); } -void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP, - const uint32_t* opListIDs, int numOpListIDs, - SkTArray<sk_sp<GrRenderTargetContext>>* results) { +bool GrCoverageCountingPathRenderer::canMakeClipProcessor(const SkPath& deviceSpacePath) const { + if (!fDrawCachablePaths && !deviceSpacePath.isVolatile()) { + return false; + } + + if (SkPathPriv::ConicWeightCnt(deviceSpacePath)) { + return false; + } + + return true; +} + +std::unique_ptr<GrFragmentProcessor> +GrCoverageCountingPathRenderer::makeClipProcessor(uint32_t opListID, const SkPath& deviceSpacePath, + const SkIRect& accessRect, int rtWidth, + int rtHeight) { + using MustCheckBounds = GrCCPRClipProcessor::MustCheckBounds; + SkASSERT(!fFlushing); - SkDEBUGCODE(fFlushing = true;) + SkASSERT(this->canMakeClipProcessor(deviceSpacePath)); - if (fRTPendingOpsMap.empty()) { - return; // Nothing to draw. + ClipPath& clipPath = fRTPendingPathsMap[opListID].fClipPaths[deviceSpacePath.getGenerationID()]; + if (clipPath.isUninitialized()) { + // This ClipPath was just created during lookup. Initialize it. + clipPath.init(deviceSpacePath, accessRect, rtWidth, rtHeight); + } else { + clipPath.addAccess(accessRect); } - this->setupPerFlushResources(onFlushRP, opListIDs, numOpListIDs, results); + bool mustCheckBounds = !clipPath.pathDevIBounds().contains(accessRect); + return skstd::make_unique<GrCCPRClipProcessor>(&clipPath, MustCheckBounds(mustCheckBounds), + deviceSpacePath.getFillType()); +} + +void CCPR::ClipPath::init(const SkPath& deviceSpacePath, const SkIRect& accessRect, int rtWidth, + int rtHeight) { + SkASSERT(this->isUninitialized()); - // Erase these last, once we are done accessing data from the SingleDraw allocators. - for (int i = 0; i < numOpListIDs; ++i) { - fRTPendingOpsMap.erase(opListIDs[i]); + fAtlasLazyProxy = GrSurfaceProxy::MakeLazy([this](GrResourceProvider* resourceProvider, + GrSurfaceOrigin* outOrigin) { + SkASSERT(fHasAtlas); + SkASSERT(!fHasAtlasTransform); + + GrTextureProxy* textureProxy = fAtlas ? fAtlas->textureProxy() : nullptr; + if (!textureProxy || !textureProxy->instantiate(resourceProvider)) { + fAtlasScale = fAtlasTranslate = {0, 0}; + SkDEBUGCODE(fHasAtlasTransform = true); + return sk_sp<GrTexture>(); + } + + fAtlasScale = {1.f / textureProxy->width(), 1.f / textureProxy->height()}; + fAtlasTranslate = {fAtlasOffsetX * fAtlasScale.x(), fAtlasOffsetY * fAtlasScale.y()}; + if (kBottomLeft_GrSurfaceOrigin == textureProxy->origin()) { + fAtlasScale.fY = -fAtlasScale.y(); + fAtlasTranslate.fY = 1 - fAtlasTranslate.y(); + } + SkDEBUGCODE(fHasAtlasTransform = true); + + *outOrigin = textureProxy->origin(); + return sk_ref_sp(textureProxy->priv().peekTexture()); + }, GrSurfaceProxy::Renderable::kYes, kAlpha_half_GrPixelConfig); + + const SkRect& pathDevBounds = deviceSpacePath.getBounds(); + if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) { + // The path is too large. We need to crop it or analytic AA can run out of fp32 precision. + crop_path(deviceSpacePath, SkIRect::MakeWH(rtWidth, rtHeight), &fDeviceSpacePath); + } else { + fDeviceSpacePath = deviceSpacePath; } + deviceSpacePath.getBounds().roundOut(&fPathDevIBounds); + fAccessRect = accessRect; } -void GrCoverageCountingPathRenderer::setupPerFlushResources(GrOnFlushResourceProvider* onFlushRP, - const uint32_t* opListIDs, - int numOpListIDs, - SkTArray<sk_sp<GrRenderTargetContext>>* results) { - using ScissorMode = GrCCPRCoverageOpsBuilder::ScissorMode; +void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP, + const uint32_t* opListIDs, int numOpListIDs, + SkTArray<sk_sp<GrRenderTargetContext>>* results) { using PathInstance = GrCCPRPathProcessor::Instance; + SkASSERT(!fFlushing); SkASSERT(!fPerFlushIndexBuffer); SkASSERT(!fPerFlushVertexBuffer); SkASSERT(!fPerFlushInstanceBuffer); SkASSERT(fPerFlushAtlases.empty()); + SkDEBUGCODE(fFlushing = true;) + + if (fRTPendingPathsMap.empty()) { + return; // Nothing to draw. + } fPerFlushResourcesAreValid = false; - // Gather the Ops that are being flushed. + // Count the paths that are being flushed. int maxTotalPaths = 0, maxPathPoints = 0, numSkPoints = 0, numSkVerbs = 0; - SkTInternalLList<DrawPathsOp> flushingOps; + SkDEBUGCODE(int numClipPaths = 0;) for (int i = 0; i < numOpListIDs; ++i) { - auto it = fRTPendingOpsMap.find(opListIDs[i]); - if (fRTPendingOpsMap.end() == it) { + auto it = fRTPendingPathsMap.find(opListIDs[i]); + if (fRTPendingPathsMap.end() == it) { continue; } - SkTInternalLList<DrawPathsOp>::Iter iter; - SkTInternalLList<DrawPathsOp>& rtFlushingOps = it->second.fOpList; - iter.init(rtFlushingOps, SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart); - while (DrawPathsOp* flushingOp = iter.get()) { - for (const auto* draw = &flushingOp->fHeadDraw; draw; draw = draw->fNext) { + const RTPendingPaths& rtPendingPaths = it->second; + + SkTInternalLList<DrawPathsOp>::Iter drawOpsIter; + drawOpsIter.init(rtPendingPaths.fDrawOps, + SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart); + while (DrawPathsOp* op = drawOpsIter.get()) { + for (const DrawPathsOp::SingleDraw* draw = op->head(); draw; draw = draw->fNext) { ++maxTotalPaths; maxPathPoints = SkTMax(draw->fPath.countPoints(), maxPathPoints); numSkPoints += draw->fPath.countPoints(); numSkVerbs += draw->fPath.countVerbs(); } - flushingOp->fOwningRTPendingOps = nullptr; // Owner is about to change to 'flushingOps'. - iter.next(); + drawOpsIter.next(); + } + + maxTotalPaths += rtPendingPaths.fClipPaths.size(); + SkDEBUGCODE(numClipPaths += rtPendingPaths.fClipPaths.size()); + for (const auto& clipsIter : rtPendingPaths.fClipPaths) { + const SkPath& path = clipsIter.second.deviceSpacePath(); + maxPathPoints = SkTMax(path.countPoints(), maxPathPoints); + numSkPoints += path.countPoints(); + numSkVerbs += path.countVerbs(); } - flushingOps.concat(std::move(rtFlushingOps)); } - if (flushingOps.isEmpty()) { + if (!maxTotalPaths) { return; // Nothing to draw. } + // Allocate GPU buffers. fPerFlushIndexBuffer = GrCCPRPathProcessor::FindOrMakeIndexBuffer(onFlushRP); if (!fPerFlushIndexBuffer) { SkDebugf("WARNING: failed to allocate ccpr path index buffer.\n"); @@ -260,7 +339,7 @@ void GrCoverageCountingPathRenderer::setupPerFlushResources(GrOnFlushResourcePro } fPerFlushInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType, - maxTotalPaths * sizeof(PathInstance)); + maxTotalPaths * sizeof(PathInstance)); if (!fPerFlushInstanceBuffer) { SkDebugf("WARNING: failed to allocate path instance buffer. No paths will be drawn.\n"); return; @@ -271,86 +350,39 @@ void GrCoverageCountingPathRenderer::setupPerFlushResources(GrOnFlushResourcePro int pathInstanceIdx = 0; GrCCPRCoverageOpsBuilder atlasOpsBuilder(maxTotalPaths, maxPathPoints, numSkPoints, numSkVerbs); - GrCCPRAtlas* atlas = nullptr; SkDEBUGCODE(int skippedTotalPaths = 0;) - SkTInternalLList<DrawPathsOp>::Iter iter; - iter.init(flushingOps, SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart); - while (DrawPathsOp* drawPathOp = iter.get()) { - SkASSERT(drawPathOp->fDebugInstanceCount > 0); - SkASSERT(-1 == drawPathOp->fBaseInstance); - drawPathOp->fBaseInstance = pathInstanceIdx; - - for (const auto* draw = &drawPathOp->fHeadDraw; draw; draw = draw->fNext) { - // parsePath gives us two tight bounding boxes: one in device space, as well as a second - // one rotated an additional 45 degrees. The path vertex shader uses these two bounding - // boxes to generate an octagon that circumscribes the path. - SkRect devBounds, devBounds45; - atlasOpsBuilder.parsePath(draw->fMatrix, draw->fPath, &devBounds, &devBounds45); - - ScissorMode scissorMode; - SkIRect clippedDevIBounds; - devBounds.roundOut(&clippedDevIBounds); - if (draw->fClipIBounds.contains(clippedDevIBounds)) { - scissorMode = ScissorMode::kNonScissored; - } else if (clippedDevIBounds.intersect(draw->fClipIBounds)) { - scissorMode = ScissorMode::kScissored; - } else { - SkDEBUGCODE(++drawPathOp->fDebugSkippedInstances); - atlasOpsBuilder.discardParsedPath(); - continue; - } - - SkIPoint16 atlasLocation; - const int h = clippedDevIBounds.height(), w = clippedDevIBounds.width(); - if (atlas && !atlas->addRect(w, h, &atlasLocation)) { - // The atlas is out of room and can't grow any bigger. - atlasOpsBuilder.emitOp(atlas->drawBounds()); - if (pathInstanceIdx > drawPathOp->fBaseInstance) { - drawPathOp->addAtlasBatch(atlas, pathInstanceIdx); - } - atlas = nullptr; - } - - if (!atlas) { - atlas = &fPerFlushAtlases.emplace_back(*onFlushRP->caps(), w, h); - SkAssertResult(atlas->addRect(w, h, &atlasLocation)); - } - - const SkMatrix& m = draw->fMatrix; - const int16_t offsetX = atlasLocation.x() - static_cast<int16_t>(clippedDevIBounds.x()), - offsetY = atlasLocation.y() - static_cast<int16_t>(clippedDevIBounds.y()); - - pathInstanceData[pathInstanceIdx++] = { - devBounds, - devBounds45, - {{m.getScaleX(), m.getSkewY(), m.getSkewX(), m.getScaleY()}}, - {{m.getTranslateX(), m.getTranslateY()}}, - {{offsetX, offsetY}}, - draw->fColor - }; - - atlasOpsBuilder.saveParsedPath(scissorMode, clippedDevIBounds, offsetX, offsetY); + // Allocate atlas(es) and fill out GPU instance buffers. + for (int i = 0; i < numOpListIDs; ++i) { + auto it = fRTPendingPathsMap.find(opListIDs[i]); + if (fRTPendingPathsMap.end() == it) { + continue; } - - SkASSERT(pathInstanceIdx == drawPathOp->fBaseInstance + drawPathOp->fDebugInstanceCount - - drawPathOp->fDebugSkippedInstances); - if (pathInstanceIdx > drawPathOp->fBaseInstance) { - drawPathOp->addAtlasBatch(atlas, pathInstanceIdx); + RTPendingPaths& rtPendingPaths = it->second; + + SkTInternalLList<DrawPathsOp>::Iter drawOpsIter; + drawOpsIter.init(rtPendingPaths.fDrawOps, + SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart); + while (DrawPathsOp* op = drawOpsIter.get()) { + pathInstanceIdx = op->setupResources(onFlushRP, &atlasOpsBuilder, pathInstanceData, + pathInstanceIdx); + drawOpsIter.next(); + SkDEBUGCODE(skippedTotalPaths += op->numSkippedInstances_debugOnly();) } - iter.next(); - SkDEBUGCODE(skippedTotalPaths += drawPathOp->fDebugSkippedInstances;) - } - SkASSERT(pathInstanceIdx == maxTotalPaths - skippedTotalPaths); - - if (atlas) { - atlasOpsBuilder.emitOp(atlas->drawBounds()); + for (auto& clipsIter : rtPendingPaths.fClipPaths) { + clipsIter.second.placePathInAtlas(this, onFlushRP, &atlasOpsBuilder); + } } fPerFlushInstanceBuffer->unmap(); - // Draw the coverage ops into their respective atlases. + SkASSERT(pathInstanceIdx == maxTotalPaths - skippedTotalPaths - numClipPaths); + + if (!fPerFlushAtlases.empty()) { + atlasOpsBuilder.emitOp(fPerFlushAtlases.back().drawBounds()); + } + SkSTArray<4, std::unique_ptr<GrCCPRCoverageOp>> atlasOps(fPerFlushAtlases.count()); if (!atlasOpsBuilder.finalize(onFlushRP, &atlasOps)) { SkDebugf("WARNING: failed to allocate ccpr atlas buffers. No paths will be drawn.\n"); @@ -358,6 +390,7 @@ void GrCoverageCountingPathRenderer::setupPerFlushResources(GrOnFlushResourcePro } SkASSERT(atlasOps.count() == fPerFlushAtlases.count()); + // Draw the coverage ops into their respective atlases. GrTAllocator<GrCCPRAtlas>::Iter atlasIter(&fPerFlushAtlases); for (std::unique_ptr<GrCCPRCoverageOp>& atlasOp : atlasOps) { SkAssertResult(atlasIter.next()); @@ -373,7 +406,109 @@ void GrCoverageCountingPathRenderer::setupPerFlushResources(GrOnFlushResourcePro fPerFlushResourcesAreValid = true; } -void DrawPathsOp::onExecute(GrOpFlushState* flushState) { +int CCPR::DrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP, + GrCCPRCoverageOpsBuilder* atlasOpsBuilder, + GrCCPRPathProcessor::Instance* pathInstanceData, + int pathInstanceIdx) { + const GrCCPRAtlas* currentAtlas = nullptr; + SkASSERT(fInstanceCount > 0); + SkASSERT(-1 == fBaseInstance); + fBaseInstance = pathInstanceIdx; + + for (const SingleDraw* draw = this->head(); draw; draw = draw->fNext) { + // parsePath gives us two tight bounding boxes: one in device space, as well as a second + // one rotated an additional 45 degrees. The path vertex shader uses these two bounding + // boxes to generate an octagon that circumscribes the path. + SkRect devBounds, devBounds45; + atlasOpsBuilder->parsePath(draw->fMatrix, draw->fPath, &devBounds, &devBounds45); + + SkIRect devIBounds; + devBounds.roundOut(&devIBounds); + + int16_t offsetX, offsetY; + GrCCPRAtlas* atlas = fCCPR->placeParsedPathInAtlas(onFlushRP, draw->fClipIBounds, + devIBounds, &offsetX, &offsetY, + atlasOpsBuilder); + if (!atlas) { + SkDEBUGCODE(++fNumSkippedInstances); + continue; + } + if (currentAtlas != atlas) { + if (currentAtlas) { + this->addAtlasBatch(currentAtlas, pathInstanceIdx); + } + currentAtlas = atlas; + } + + const SkMatrix& m = draw->fMatrix; + pathInstanceData[pathInstanceIdx++] = { + devBounds, + devBounds45, + {{m.getScaleX(), m.getSkewY(), m.getSkewX(), m.getScaleY()}}, + {{m.getTranslateX(), m.getTranslateY()}}, + {{offsetX, offsetY}}, + draw->fColor + }; + } + + SkASSERT(pathInstanceIdx == fBaseInstance + fInstanceCount - fNumSkippedInstances); + if (currentAtlas) { + this->addAtlasBatch(currentAtlas, pathInstanceIdx); + } + + return pathInstanceIdx; +} + +void CCPR::ClipPath::placePathInAtlas(GrCoverageCountingPathRenderer* ccpr, + GrOnFlushResourceProvider* onFlushRP, + GrCCPRCoverageOpsBuilder* atlasOpsBuilder) { + SkASSERT(!this->isUninitialized()); + SkASSERT(!fHasAtlas); + atlasOpsBuilder->parseDeviceSpacePath(fDeviceSpacePath); + fAtlas = ccpr->placeParsedPathInAtlas(onFlushRP, fAccessRect, fPathDevIBounds, &fAtlasOffsetX, + &fAtlasOffsetY, atlasOpsBuilder); + SkDEBUGCODE(fHasAtlas = true); +} + +GrCCPRAtlas* +GrCoverageCountingPathRenderer::placeParsedPathInAtlas(GrOnFlushResourceProvider* onFlushRP, + const SkIRect& clipIBounds, + const SkIRect& pathIBounds, + int16_t* atlasOffsetX, + int16_t* atlasOffsetY, + GrCCPRCoverageOpsBuilder* atlasOpsBuilder) { + using ScissorMode = GrCCPRCoverageOpsBuilder::ScissorMode; + + ScissorMode scissorMode; + SkIRect clippedPathIBounds; + if (clipIBounds.contains(pathIBounds)) { + clippedPathIBounds = pathIBounds; + scissorMode = ScissorMode::kNonScissored; + } else if (clippedPathIBounds.intersect(clipIBounds, pathIBounds)) { + scissorMode = ScissorMode::kScissored; + } else { + atlasOpsBuilder->discardParsedPath(); + return nullptr; + } + + SkIPoint16 atlasLocation; + const int h = clippedPathIBounds.height(), w = clippedPathIBounds.width(); + if (fPerFlushAtlases.empty() || !fPerFlushAtlases.back().addRect(w, h, &atlasLocation)) { + if (!fPerFlushAtlases.empty()) { + // The atlas is out of room and can't grow any bigger. + atlasOpsBuilder->emitOp(fPerFlushAtlases.back().drawBounds()); + } + fPerFlushAtlases.emplace_back(*onFlushRP->caps(), w, h).addRect(w, h, &atlasLocation); + } + + *atlasOffsetX = atlasLocation.x() - static_cast<int16_t>(clippedPathIBounds.left()); + *atlasOffsetY = atlasLocation.y() - static_cast<int16_t>(clippedPathIBounds.top()); + atlasOpsBuilder->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX, *atlasOffsetY); + + return &fPerFlushAtlases.back(); +} + +void CCPR::DrawPathsOp::onExecute(GrOpFlushState* flushState) { SkASSERT(fCCPR->fFlushing); SkASSERT(flushState->rtCommandBuffer()); @@ -381,7 +516,7 @@ void DrawPathsOp::onExecute(GrOpFlushState* flushState) { return; // Setup failed. } - SkASSERT(fBaseInstance >= 0); // Make sure setupPerFlushResources has set us up. + SkASSERT(fBaseInstance >= 0); // Make sure setupResources has been called. GrPipeline::InitArgs initArgs; initArgs.fFlags = fSRGBFlags; @@ -401,8 +536,9 @@ void DrawPathsOp::onExecute(GrOpFlushState* flushState) { continue; // Atlas failed to allocate. } - GrCCPRPathProcessor coverProc(flushState->resourceProvider(), batch.fAtlas->textureProxy(), - this->getFillType(), *flushState->gpu()->caps()->shaderCaps()); + GrCCPRPathProcessor coverProc(flushState->resourceProvider(), + sk_ref_sp(batch.fAtlas->textureProxy()), this->getFillType(), + *flushState->gpu()->caps()->shaderCaps()); GrMesh mesh(GrPrimitiveType::kTriangles); mesh.setIndexedInstanced(fCCPR->fPerFlushIndexBuffer.get(), @@ -414,7 +550,7 @@ void DrawPathsOp::onExecute(GrOpFlushState* flushState) { flushState->rtCommandBuffer()->draw(pipeline, coverProc, &mesh, nullptr, 1, this->bounds()); } - SkASSERT(baseInstance == fBaseInstance + fDebugInstanceCount - fDebugSkippedInstances); + SkASSERT(baseInstance == fBaseInstance + fInstanceCount - fNumSkippedInstances); } void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, @@ -424,5 +560,9 @@ void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint fPerFlushInstanceBuffer.reset(); fPerFlushVertexBuffer.reset(); fPerFlushIndexBuffer.reset(); + // We wait to erase these until after flush, once Ops and FPs are done accessing their data. + for (int i = 0; i < numOpListIDs; ++i) { + fRTPendingPathsMap.erase(opListIDs[i]); + } SkDEBUGCODE(fFlushing = false;) } diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h index b428e709d1..1d08f381a2 100644 --- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h +++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h @@ -14,6 +14,7 @@ #include "SkTInternalLList.h" #include "ccpr/GrCCPRAtlas.h" #include "ccpr/GrCCPRCoverageOp.h" +#include "ccpr/GrCCPRPathProcessor.h" #include "ops/GrDrawOp.h" #include <map> @@ -28,7 +29,7 @@ class GrCoverageCountingPathRenderer : public GrPathRenderer , public GrOnFlushCallbackObject { - struct RTPendingOps; + struct RTPendingPaths; public: static bool IsSupported(const GrCaps&); @@ -36,23 +37,11 @@ public: bool drawCachablePaths); ~GrCoverageCountingPathRenderer() override { - // Ensure nothing exists that could have a dangling pointer back into this class. - SkASSERT(fRTPendingOpsMap.empty()); + // Ensure no Ops exist that could have a dangling pointer back into this class. + SkASSERT(fRTPendingPathsMap.empty()); SkASSERT(0 == fPendingDrawOpsCount); } - // GrPathRenderer overrides. - StencilSupport onGetStencilSupport(const GrShape&) const override { - return GrPathRenderer::kNoSupport_StencilSupport; - } - CanDrawPath onCanDrawPath(const CanDrawPathArgs& args) const override; - bool onDrawPath(const DrawPathArgs&) final; - - // GrOnFlushCallbackObject overrides. - void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs, - SkTArray<sk_sp<GrRenderTargetContext>>* results) override; - void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override; - // This is the Op that ultimately draws a path into its final destination, using the atlas we // generate at flush time. class DrawPathsOp : public GrDrawOp { @@ -63,38 +52,46 @@ public: DrawPathsOp(GrCoverageCountingPathRenderer*, const DrawPathArgs&, GrColor); ~DrawPathsOp() override; - const char* name() const override { return "GrCoverageCountingPathRenderer::DrawPathsOp"; } + struct SingleDraw { + SkIRect fClipIBounds; + SkMatrix fMatrix; + SkPath fPath; + GrColor fColor; + SingleDraw* fNext = nullptr; + }; - void visitProxies(const VisitProxyFunc& func) const override { - fProcessors.visitProxies(func); + const SingleDraw* head() const { + SkASSERT(fInstanceCount >= 1); + return &fHeadDraw; } + SkDEBUGCODE(int numSkippedInstances_debugOnly() const { return fNumSkippedInstances; }) + // GrDrawOp overrides. + const char* name() const override { return "GrCoverageCountingPathRenderer::DrawPathsOp"; } FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; } RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*, GrPixelConfigIsClamped) override; void wasRecorded(GrRenderTargetOpList*) override; bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override; + void visitProxies(const VisitProxyFunc& func) const override { + fProcessors.visitProxies(func); + } void onPrepare(GrOpFlushState*) override {} void onExecute(GrOpFlushState*) override; + int setupResources(GrOnFlushResourceProvider*, GrCCPRCoverageOpsBuilder*, + GrCCPRPathProcessor::Instance* pathInstanceData, int pathInstanceIdx); + private: SkPath::FillType getFillType() const { - SkASSERT(fDebugInstanceCount >= 1); + SkASSERT(fInstanceCount >= 1); return fHeadDraw.fPath.getFillType(); } - struct SingleDraw { - SkIRect fClipIBounds; - SkMatrix fMatrix; - SkPath fPath; - GrColor fColor; - SingleDraw* fNext = nullptr; - }; - struct AtlasBatch { - const GrCCPRAtlas* fAtlas; - int fEndInstanceIdx; + const GrCCPRAtlas* fAtlas; + int fEndInstanceIdx; }; void addAtlasBatch(const GrCCPRAtlas* atlas, int endInstanceIdx) { @@ -104,46 +101,130 @@ public: fAtlasBatches.push_back() = {atlas, endInstanceIdx}; } - GrCoverageCountingPathRenderer* const fCCPR; - const uint32_t fSRGBFlags; - GrProcessorSet fProcessors; - SingleDraw fHeadDraw; - SingleDraw* fTailDraw; - RTPendingOps* fOwningRTPendingOps; - int fBaseInstance; - SkDEBUGCODE(int fDebugInstanceCount;) - SkDEBUGCODE(int fDebugSkippedInstances;) - SkSTArray<1, AtlasBatch, true> fAtlasBatches; - - friend class GrCoverageCountingPathRenderer; + GrCoverageCountingPathRenderer* const fCCPR; + const uint32_t fSRGBFlags; + GrProcessorSet fProcessors; + SingleDraw fHeadDraw; + SingleDraw* fTailDraw; + RTPendingPaths* fOwningRTPendingPaths; + int fBaseInstance; + SkDEBUGCODE(int fInstanceCount;) + SkDEBUGCODE(int fNumSkippedInstances;) + SkSTArray<1, AtlasBatch, true> fAtlasBatches; typedef GrDrawOp INHERITED; }; + // GrPathRenderer overrides. + StencilSupport onGetStencilSupport(const GrShape&) const override { + return GrPathRenderer::kNoSupport_StencilSupport; + } + CanDrawPath onCanDrawPath(const CanDrawPathArgs& args) const override; + bool onDrawPath(const DrawPathArgs&) final; + + // These are keyed by SkPath generation ID, and store which device-space paths are accessed and + // where by clip FPs in a given opList. A single ClipPath can be referenced by multiple FPs. At + // flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps. + class ClipPath { + public: + ClipPath() = default; + ClipPath(const ClipPath&) = delete; + + ~ClipPath() { + // Ensure no clip FPs exist with a dangling pointer back into this class. + SkASSERT(!fAtlasLazyProxy || fAtlasLazyProxy->isUnique_debugOnly()); + // Ensure no lazy proxy callbacks exist with a dangling pointer back into this class. + SkASSERT(fHasAtlasTransform); + } + + bool isUninitialized() const { return !fAtlasLazyProxy; } + void init(const SkPath& deviceSpacePath, const SkIRect& accessRect, int rtWidth, + int rtHeight); + void addAccess(const SkIRect& accessRect) { + SkASSERT(!this->isUninitialized()); + fAccessRect.join(accessRect); + } + + GrTextureProxy* atlasLazyProxy() const { + SkASSERT(!this->isUninitialized()); + return fAtlasLazyProxy.get(); + } + const SkPath& deviceSpacePath() const { + SkASSERT(!this->isUninitialized()); + return fDeviceSpacePath; + } + const SkIRect& pathDevIBounds() const { + SkASSERT(!this->isUninitialized()); + return fPathDevIBounds; + } + void placePathInAtlas(GrCoverageCountingPathRenderer*, GrOnFlushResourceProvider*, + GrCCPRCoverageOpsBuilder*); + + const SkVector& atlasScale() const { SkASSERT(fHasAtlasTransform); return fAtlasScale; } + const SkVector& atlasTranslate() const { + SkASSERT(fHasAtlasTransform); + return fAtlasTranslate; + } + + private: + sk_sp<GrTextureProxy> fAtlasLazyProxy; + SkPath fDeviceSpacePath; + SkIRect fPathDevIBounds; + SkIRect fAccessRect; + + const GrCCPRAtlas* fAtlas = nullptr; + int16_t fAtlasOffsetX; + int16_t fAtlasOffsetY; + SkDEBUGCODE(bool fHasAtlas = false); + + SkVector fAtlasScale; + SkVector fAtlasTranslate; + SkDEBUGCODE(bool fHasAtlasTransform = false); + }; + + bool canMakeClipProcessor(const SkPath& deviceSpacePath) const; + + std::unique_ptr<GrFragmentProcessor> makeClipProcessor(uint32_t oplistID, + const SkPath& deviceSpacePath, + const SkIRect& accessRect, + int rtWidth, int rtHeight); + + // GrOnFlushCallbackObject overrides. + void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs, + SkTArray<sk_sp<GrRenderTargetContext>>* results) override; + void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override; + private: GrCoverageCountingPathRenderer(bool drawCachablePaths) : fDrawCachablePaths(drawCachablePaths) {} - void setupPerFlushResources(GrOnFlushResourceProvider*, const uint32_t* opListIDs, - int numOpListIDs, SkTArray<sk_sp<GrRenderTargetContext>>* results); + GrCCPRAtlas* placeParsedPathInAtlas(GrOnFlushResourceProvider*, const SkIRect& accessRect, + const SkIRect& pathIBounds, int16_t* atlasOffsetX, + int16_t* atlasOffsetY, GrCCPRCoverageOpsBuilder*); + + struct RTPendingPaths { + ~RTPendingPaths() { + // Ensure all DrawPathsOps in this opList have been deleted. + SkASSERT(fDrawOps.isEmpty()); + } - struct RTPendingOps { - SkTInternalLList<DrawPathsOp> fOpList; - GrSTAllocator<256, DrawPathsOp::SingleDraw> fDrawsAllocator; + SkTInternalLList<DrawPathsOp> fDrawOps; + std::map<uint32_t, ClipPath> fClipPaths; + GrSTAllocator<256, DrawPathsOp::SingleDraw> fDrawsAllocator; }; - // Map from render target ID to the individual render target's pending path ops. - std::map<uint32_t, RTPendingOps> fRTPendingOpsMap; - SkDEBUGCODE(int fPendingDrawOpsCount = 0;) + // A map from render target ID to the individual render target's pending paths. + std::map<uint32_t, RTPendingPaths> fRTPendingPathsMap; + SkDEBUGCODE(int fPendingDrawOpsCount = 0;) - sk_sp<GrBuffer> fPerFlushIndexBuffer; - sk_sp<GrBuffer> fPerFlushVertexBuffer; - sk_sp<GrBuffer> fPerFlushInstanceBuffer; - GrSTAllocator<4, GrCCPRAtlas> fPerFlushAtlases; - bool fPerFlushResourcesAreValid; - SkDEBUGCODE(bool fFlushing = false;) + sk_sp<GrBuffer> fPerFlushIndexBuffer; + sk_sp<GrBuffer> fPerFlushVertexBuffer; + sk_sp<GrBuffer> fPerFlushInstanceBuffer; + GrSTAllocator<4, GrCCPRAtlas> fPerFlushAtlases; + bool fPerFlushResourcesAreValid; + SkDEBUGCODE(bool fFlushing = false;) - const bool fDrawCachablePaths; + const bool fDrawCachablePaths; }; #endif diff --git a/tests/GrCCPRTest.cpp b/tests/GrCCPRTest.cpp index 32d4f6aa45..33246bfc88 100644 --- a/tests/GrCCPRTest.cpp +++ b/tests/GrCCPRTest.cpp @@ -28,6 +28,30 @@ static constexpr int kCanvasSize = 100; +class CCPRClip : public GrClip { +public: + CCPRClip(GrCoverageCountingPathRenderer* ccpr, const SkPath& path) : fCCPR(ccpr), fPath(path) {} + +private: + bool apply(GrContext*, GrRenderTargetContext* rtc, bool, bool, GrAppliedClip* out, + SkRect* bounds) const override { + out->addCoverageFP(fCCPR->makeClipProcessor(rtc->priv().testingOnly_getOpListID(), fPath, + SkIRect::MakeWH(rtc->width(), rtc->height()), + rtc->width(), rtc->height())); + return true; + } + bool quickContains(const SkRect&) const final { return false; } + bool isRRect(const SkRect& rtBounds, SkRRect* rr, GrAA*) const final { return false; } + void getConservativeBounds(int width, int height, SkIRect* rect, bool* iior) const final { + rect->set(0, 0, width, height); + if (iior) { + *iior = false; + } + } + GrCoverageCountingPathRenderer* const fCCPR; + const SkPath fPath; +}; + class CCPRPathDrawer { public: CCPRPathDrawer(GrContext* ctx, skiatest::Reporter* reporter) @@ -66,6 +90,16 @@ public: &noClip, &clipBounds, &matrix, &shape, GrAAType::kCoverage, false}); } + void clipFullscreenRect(SkPath clipPath, GrColor4f color = GrColor4f(0, 1, 0, 1)) { + SkASSERT(this->valid()); + + GrPaint paint; + paint.setColor4f(color); + + fRTC->drawRect(CCPRClip(fCCPR, clipPath), std::move(paint), GrAA::kYes, SkMatrix::I(), + SkRect::MakeIWH(kCanvasSize, kCanvasSize)); + } + void flush() const { SkASSERT(this->valid()); fCtx->flush(); @@ -137,6 +171,7 @@ class GrCCPRTest_cleanup : public CCPRTest { // Ensure paths get unreffed. for (int i = 0; i < 10; ++i) { ccpr.drawPath(fPath); + ccpr.clipFullscreenRect(fPath); } REPORTER_ASSERT(reporter, !SkPathPriv::TestingOnly_unique(fPath)); ccpr.flush(); @@ -145,6 +180,7 @@ class GrCCPRTest_cleanup : public CCPRTest { // Ensure paths get unreffed when we delete the context without flushing. for (int i = 0; i < 10; ++i) { ccpr.drawPath(fPath); + ccpr.clipFullscreenRect(fPath); } ccpr.abandonGrContext(); REPORTER_ASSERT(reporter, !SkPathPriv::TestingOnly_unique(fPath)); @@ -196,6 +232,18 @@ class GrCCPRTest_parseEmptyPath : public CCPRTest { // This is the test. It will exercise various internal asserts and verify we do not crash. ccpr.flush(); + + // Now try again with clips. + ccpr.clipFullscreenRect(largeOutsidePath); + ccpr.clipFullscreenRect(emptyPath); + ccpr.flush(); + + // ... and both. + ccpr.drawPath(largeOutsidePath); + ccpr.clipFullscreenRect(largeOutsidePath); + ccpr.drawPath(emptyPath); + ccpr.clipFullscreenRect(emptyPath); + ccpr.flush(); } }; DEF_CCPR_TEST(GrCCPRTest_parseEmptyPath) diff --git a/tools/gpu/GrTest.cpp b/tools/gpu/GrTest.cpp index 524cd634fe..0001d1dc27 100644 --- a/tools/gpu/GrTest.cpp +++ b/tools/gpu/GrTest.cpp @@ -275,6 +275,10 @@ int GrResourceCache::countUniqueKeysWithTag(const char* tag) const { SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fRenderTargetContext->singleOwner());) +uint32_t GrRenderTargetContextPriv::testingOnly_getOpListID() { + return fRenderTargetContext->getOpList()->uniqueID(); +} + uint32_t GrRenderTargetContextPriv::testingOnly_addDrawOp(std::unique_ptr<GrDrawOp> op) { return this->testingOnly_addDrawOp(GrNoClip(), std::move(op)); } |