aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/ccpr/GrCCPerFlushResources.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu/ccpr/GrCCPerFlushResources.cpp')
-rw-r--r--src/gpu/ccpr/GrCCPerFlushResources.cpp189
1 files changed, 160 insertions, 29 deletions
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index 4738a257fe..84f905cbc6 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -13,14 +13,83 @@
#include "GrSurfaceContextPriv.h"
#include "GrRenderTargetContext.h"
#include "SkMakeUnique.h"
+#include "ccpr/GrCCPathCache.h"
using CoverageCountBatchID = GrCCPathParser::CoverageCountBatchID;
using PathInstance = GrCCPathProcessor::Instance;
namespace {
+// Base class for an Op that renders a CCPR atlas.
+class AtlasOp : public GrDrawOp {
+public:
+ FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
+ RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
+ GrPixelConfigIsClamped) override { return RequiresDstTexture::kNo; }
+ bool onCombineIfPossible(GrOp* other, const GrCaps&) override {
+ SK_ABORT("Only expected one Op per CCPR atlas.");
+ return true;
+ }
+ void onPrepare(GrOpFlushState*) override {}
+
+protected:
+ AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources,
+ const SkISize& drawBounds)
+ : GrDrawOp(classID)
+ , fResources(std::move(resources)) {
+ this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()),
+ GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
+ }
+
+ const sk_sp<const GrCCPerFlushResources> fResources;
+};
+
+// Copies paths from a stashed coverage count atlas into an 8-bit literal-coverage atlas.
+class CopyAtlasOp : public AtlasOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrContext* context,
+ sk_sp<const GrCCPerFlushResources> resources,
+ sk_sp<GrTextureProxy> copyProxy, int baseInstance,
+ int endInstance, const SkISize& drawBounds) {
+ return std::unique_ptr<GrDrawOp>(new CopyAtlasOp(std::move(resources), std::move(copyProxy),
+ baseInstance, endInstance, drawBounds));
+ }
+
+ const char* name() const override { return "CopyAtlasOp (CCPR)"; }
+ void visitProxies(const VisitProxyFunc& fn) const override { fn(fStashedAtlasProxy.get()); }
+
+ void onExecute(GrOpFlushState* flushState) override {
+ SkASSERT(fStashedAtlasProxy);
+ GrPipeline pipeline(flushState->proxy(), GrPipeline::ScissorState::kDisabled,
+ SkBlendMode::kSrc);
+ GrCCPathProcessor pathProc(flushState->resourceProvider(), std::move(fStashedAtlasProxy));
+ pathProc.drawPaths(flushState, pipeline, *fResources, fBaseInstance, fEndInstance,
+ this->bounds());
+ // Ensure we released the stashed atlas proxy. This allows its underlying texture to be
+ // reused as the current flush's mainline CCPR atlas if needed.
+ SkASSERT(!fStashedAtlasProxy);
+ }
+
+private:
+ friend class ::GrOpMemoryPool; // for ctor
+
+ CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> copyProxy,
+ int baseInstance, int endInstance, const SkISize& drawBounds)
+ : AtlasOp(ClassID(), std::move(resources), drawBounds)
+ , fStashedAtlasProxy(copyProxy)
+ , fBaseInstance(baseInstance)
+ , fEndInstance(endInstance) {
+ }
+
+ sk_sp<GrTextureProxy> fStashedAtlasProxy;
+ const int fBaseInstance;
+ const int fEndInstance;
+};
+
// Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
-class RenderAtlasOp : public GrDrawOp {
+class RenderAtlasOp : public AtlasOp {
public:
DEFINE_OP_CLASS_ID
@@ -33,14 +102,6 @@ public:
// GrDrawOp interface.
const char* name() const override { return "RenderAtlasOp (CCPR)"; }
- FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
- RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
- GrPixelConfigIsClamped) override { return RequiresDstTexture::kNo; }
- bool onCombineIfPossible(GrOp* other, const GrCaps&) override {
- SK_ABORT("Only expected one Op per CCPR atlas.");
- return true;
- }
- void onPrepare(GrOpFlushState*) override {}
void onExecute(GrOpFlushState* flushState) override {
fResources->pathParser().drawCoverageCount(flushState, fBatchID, fDrawBounds);
@@ -51,29 +112,35 @@ private:
RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, CoverageCountBatchID batchID,
const SkISize& drawBounds)
- : GrDrawOp(ClassID())
- , fResources(std::move(resources))
+ : AtlasOp(ClassID(), std::move(resources), drawBounds)
, fBatchID(batchID)
, fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
- this->setBounds(SkRect::MakeIWH(fDrawBounds.width(), fDrawBounds.height()),
- GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
}
- const sk_sp<const GrCCPerFlushResources> fResources;
const CoverageCountBatchID fBatchID;
const SkIRect fDrawBounds;
};
}
+static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
+ return specs.fNumCachedPaths +
+ specs.fNumCopiedPaths*2 + // 1 copy + 1 draw.
+ specs.fNumRenderedPaths;
+}
+
GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
const GrCCPerFlushResourceSpecs& specs)
- : fPathParser(specs.fNumRenderedPaths + specs.fNumClipPaths, specs.fParsingPathStats)
- , fAtlasStack(kAlpha_half_GrPixelConfig, specs.fAtlasSpecs, onFlushRP->caps())
+ : fPathParser(specs.fNumRenderedPaths + specs.fNumClipPaths, specs.fRenderedPathStats)
+ , fCopyAtlasStack(kAlpha_8_GrPixelConfig, specs.fCopyAtlasSpecs, onFlushRP->caps())
+ , fRenderedAtlasStack(kAlpha_half_GrPixelConfig, specs.fRenderedAtlasSpecs,
+ onFlushRP->caps())
, fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
, fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
, fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
- specs.fNumRenderedPaths * sizeof(PathInstance))) {
+ inst_buffer_count(specs) * sizeof(PathInstance)))
+ , fNextCopyInstanceIdx(0)
+ , fNextPathInstanceIdx(specs.fNumCopiedPaths) {
if (!fIndexBuffer) {
SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
return;
@@ -88,26 +155,43 @@ GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushR
}
fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
SkASSERT(fPathInstanceData);
- SkDEBUGCODE(fEndPathInstance = specs.fNumRenderedPaths);
+ SkDEBUGCODE(fEndCopyInstance = specs.fNumCopiedPaths);
+ SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
+}
+
+GrCCAtlas* GrCCPerFlushResources::copyPathToCachedAtlas(const GrCCPathCacheEntry& entry,
+ GrCCPathProcessor::DoEvenOddFill evenOdd,
+ SkIVector* newAtlasOffset) {
+ SkASSERT(this->isMapped());
+ SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
+ SkASSERT(!entry.hasCachedAtlas()); // Unexpected, but not necessarily a problem.
+
+ if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry.devIBounds(), newAtlasOffset)) {
+ // We did not fit in the previous copy atlas and it was retired. We will render the copies
+ // up until fNextCopyInstanceIdx into the retired atlas during finalize().
+ retiredAtlas->setUserBatchID(fNextCopyInstanceIdx);
+ }
+
+ fPathInstanceData[fNextCopyInstanceIdx++].set(entry, *newAtlasOffset, GrColor_WHITE, evenOdd);
+ return &fCopyAtlasStack.current();
}
const GrCCAtlas* GrCCPerFlushResources::renderPathInAtlas(const SkIRect& clipIBounds,
const SkMatrix& m, const SkPath& path,
SkRect* devBounds, SkRect* devBounds45,
+ SkIRect* devIBounds,
SkIVector* devToAtlasOffset) {
SkASSERT(this->isMapped());
SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
fPathParser.parsePath(m, path, devBounds, devBounds45);
+ devBounds->roundOut(devIBounds);
- SkIRect devIBounds;
- devBounds->roundOut(&devIBounds);
-
- if (!this->placeParsedPathInAtlas(clipIBounds, devIBounds, devToAtlasOffset)) {
+ if (!this->placeParsedPathInAtlas(clipIBounds, *devIBounds, devToAtlasOffset)) {
SkDEBUGCODE(--fEndPathInstance);
return nullptr; // Path was degenerate or clipped away.
}
- return &fAtlasStack.current();
+ return &fRenderedAtlasStack.current();
}
const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
@@ -118,7 +202,7 @@ const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
if (!this->placeParsedPathInAtlas(clipIBounds, devPathIBounds, devToAtlasOffset)) {
return nullptr;
}
- return &fAtlasStack.current();
+ return &fRenderedAtlasStack.current();
}
bool GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
@@ -137,7 +221,8 @@ bool GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
return false;
}
- if (GrCCAtlas* retiredAtlas = fAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
+ if (GrCCAtlas* retiredAtlas =
+ fRenderedAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
// We did not fit in the previous coverage count atlas and it was retired. Close the path
// parser's current batch (which does not yet include the path we just parsed). We will
// render this batch into the retired atlas during finalize().
@@ -149,16 +234,21 @@ bool GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
}
bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
+ sk_sp<GrTextureProxy> stashedAtlasProxy,
SkTArray<sk_sp<GrRenderTargetContext>>* out) {
SkASSERT(this->isMapped());
SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
+ // No assert for fEndCopyInstance because the caller may have detected and skipped duplicates.
fInstanceBuffer->unmap();
fPathInstanceData = nullptr;
- if (!fAtlasStack.empty()) {
+ if (!fCopyAtlasStack.empty()) {
+ fCopyAtlasStack.current().setUserBatchID(fNextCopyInstanceIdx);
+ }
+ if (!fRenderedAtlasStack.empty()) {
CoverageCountBatchID batchID = fPathParser.closeCurrentBatch();
- fAtlasStack.current().setUserBatchID(batchID);
+ fRenderedAtlasStack.current().setUserBatchID(batchID);
}
// Build the GPU buffers to render path coverage counts. (This must not happen until after the
@@ -168,8 +258,30 @@ bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
return false;
}
- // Render the atlas(es).
- for (GrCCAtlasStack::Iter atlas(fAtlasStack); atlas.next();) {
+ // Draw the copies from the stashed atlas into 8-bit cached atlas(es).
+ int baseCopyInstance = 0;
+ for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
+ int endCopyInstance = atlas->getUserBatchID();
+ if (endCopyInstance <= baseCopyInstance) {
+ SkASSERT(endCopyInstance == baseCopyInstance);
+ continue;
+ }
+ if (auto rtc = atlas->makeRenderTargetContext(onFlushRP)) {
+ GrContext* ctx = rtc->surfPriv().getContext();
+ auto op = CopyAtlasOp::Make(ctx, sk_ref_sp(this), stashedAtlasProxy, baseCopyInstance,
+ endCopyInstance, atlas->drawBounds());
+ rtc->addDrawOp(GrNoClip(), std::move(op));
+ out->push_back(std::move(rtc));
+ }
+ baseCopyInstance = endCopyInstance;
+ }
+
+ // Release the stashed atlas before creating new one(s). This allows us to recycle the same
+ // underlying texture with the upcoming rendered atlases.
+ stashedAtlasProxy = nullptr;
+
+ // Render the coverage count atlas(es).
+ for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
if (auto rtc = atlas->makeRenderTargetContext(onFlushRP)) {
auto op = RenderAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
atlas->getUserBatchID(), atlas->drawBounds());
@@ -180,3 +292,22 @@ bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
return true;
}
+
+void GrCCPerFlushResourceSpecs::convertCopiesToRenders() {
+ fNumRenderedPaths += fNumCopiedPaths;
+ fNumCopiedPaths = 0;
+
+ fRenderedAtlasSpecs.fApproxNumPixels += fCopyAtlasSpecs.fApproxNumPixels;
+ fRenderedAtlasSpecs.fMinWidth =
+ SkTMax(fRenderedAtlasSpecs.fMinWidth, fCopyAtlasSpecs.fMinWidth);
+ fRenderedAtlasSpecs.fMinHeight =
+ SkTMax(fRenderedAtlasSpecs.fMinHeight, fCopyAtlasSpecs.fMinHeight);
+ fCopyAtlasSpecs = GrCCAtlas::Specs();
+
+ fRenderedPathStats.fMaxPointsPerPath =
+ SkTMax(fRenderedPathStats.fMaxPointsPerPath, fCopyPathStats.fMaxPointsPerPath);
+ fRenderedPathStats.fNumTotalSkPoints += fCopyPathStats.fNumTotalSkPoints;
+ fRenderedPathStats.fNumTotalSkVerbs += fCopyPathStats.fNumTotalSkVerbs;
+ fRenderedPathStats.fNumTotalConicWeights += fCopyPathStats.fNumTotalConicWeights;
+ fCopyPathStats = GrCCPathParser::PathStats();
+}