aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Chris Dalton <csmartdalton@google.com>2018-05-09 01:08:38 -0600
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-05-09 18:14:05 +0000
commit5ba36ba9f5b414ffbe6f1a60598f47c5da57941f (patch)
tree6c614d2517b778b3666cefbfce3c6e24fcdfe4f9
parent969a738e7f7fe03832acb86362d133db40623c01 (diff)
ccpr: Clean up GrCoverageCountingPathRenderer
Extracts all the nested classes to their own files and detangles their interactions. Encapsulates the per-flush resources in their in their own separate class. Bug: skia: Change-Id: Ic134b627f6b66cb2ce1e5d6f896ac6b2f75f6fa2 Reviewed-on: https://skia-review.googlesource.com/126845 Commit-Queue: Chris Dalton <csmartdalton@google.com> Reviewed-by: Brian Salomon <bsalomon@google.com>
-rw-r--r--gn/gpu.gni6
-rw-r--r--src/gpu/ccpr/GrCCClipPath.cpp58
-rw-r--r--src/gpu/ccpr/GrCCClipPath.h79
-rw-r--r--src/gpu/ccpr/GrCCClipProcessor.cpp7
-rw-r--r--src/gpu/ccpr/GrCCClipProcessor.h9
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.cpp161
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.h97
-rw-r--r--src/gpu/ccpr/GrCCPerFlushResources.cpp126
-rw-r--r--src/gpu/ccpr/GrCCPerFlushResources.h71
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp441
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.h243
11 files changed, 741 insertions, 557 deletions
diff --git a/gn/gpu.gni b/gn/gpu.gni
index ac84e45b3b..8b79484fc5 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -299,6 +299,8 @@ skia_gpu_sources = [
# coverage counting path renderer
"$_src/gpu/ccpr/GrCCAtlas.cpp",
"$_src/gpu/ccpr/GrCCAtlas.h",
+ "$_src/gpu/ccpr/GrCCClipPath.cpp",
+ "$_src/gpu/ccpr/GrCCClipPath.h",
"$_src/gpu/ccpr/GrCCClipProcessor.cpp",
"$_src/gpu/ccpr/GrCCClipProcessor.h",
"$_src/gpu/ccpr/GrCCConicShader.cpp",
@@ -309,12 +311,16 @@ skia_gpu_sources = [
"$_src/gpu/ccpr/GrCCCoverageProcessor.h",
"$_src/gpu/ccpr/GrCCCubicShader.cpp",
"$_src/gpu/ccpr/GrCCCubicShader.h",
+ "$_src/gpu/ccpr/GrCCDrawPathsOp.cpp",
+ "$_src/gpu/ccpr/GrCCDrawPathsOp.h",
"$_src/gpu/ccpr/GrCCGeometry.cpp",
"$_src/gpu/ccpr/GrCCGeometry.h",
"$_src/gpu/ccpr/GrCCPathParser.cpp",
"$_src/gpu/ccpr/GrCCPathParser.h",
"$_src/gpu/ccpr/GrCCPathProcessor.cpp",
"$_src/gpu/ccpr/GrCCPathProcessor.h",
+ "$_src/gpu/ccpr/GrCCPerFlushResources.cpp",
+ "$_src/gpu/ccpr/GrCCPerFlushResources.h",
"$_src/gpu/ccpr/GrCCQuadraticShader.cpp",
"$_src/gpu/ccpr/GrCCQuadraticShader.h",
"$_src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp",
diff --git a/src/gpu/ccpr/GrCCClipPath.cpp b/src/gpu/ccpr/GrCCClipPath.cpp
new file mode 100644
index 0000000000..6b8a96cad3
--- /dev/null
+++ b/src/gpu/ccpr/GrCCClipPath.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrCCClipPath.h"
+
+#include "GrOnFlushResourceProvider.h"
+#include "GrProxyProvider.h"
+#include "GrTexture.h"
+#include "ccpr/GrCCPerFlushResources.h"
+
+void GrCCClipPath::init(GrProxyProvider* proxyProvider,
+ const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ int rtWidth, int rtHeight) {
+ SkASSERT(!this->isInitialized());
+
+ fAtlasLazyProxy = proxyProvider->createFullyLazyProxy(
+ [this](GrResourceProvider* resourceProvider) {
+ if (!resourceProvider) {
+ return sk_sp<GrTexture>();
+ }
+ SkASSERT(fHasAtlas);
+ SkASSERT(!fHasAtlasTransform);
+
+ GrTextureProxy* textureProxy = fAtlas ? fAtlas->textureProxy() : nullptr;
+ if (!textureProxy || !textureProxy->instantiate(resourceProvider)) {
+ fAtlasScale = fAtlasTranslate = {0, 0};
+ SkDEBUGCODE(fHasAtlasTransform = true);
+ return sk_sp<GrTexture>();
+ }
+
+ SkASSERT(kTopLeft_GrSurfaceOrigin == textureProxy->origin());
+
+ fAtlasScale = {1.f / textureProxy->width(), 1.f / textureProxy->height()};
+ fAtlasTranslate = {fAtlasOffsetX * fAtlasScale.x(),
+ fAtlasOffsetY * fAtlasScale.y()};
+ SkDEBUGCODE(fHasAtlasTransform = true);
+
+ return sk_ref_sp(textureProxy->priv().peekTexture());
+ },
+ GrProxyProvider::Renderable::kYes, kTopLeft_GrSurfaceOrigin, kAlpha_half_GrPixelConfig);
+
+ fDeviceSpacePath = deviceSpacePath;
+ fDeviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
+ fAccessRect = accessRect;
+}
+
+void GrCCClipPath::placePathInAtlas(GrCCPerFlushResources* resources,
+ GrOnFlushResourceProvider* onFlushRP) {
+ SkASSERT(this->isInitialized());
+ SkASSERT(!fHasAtlas);
+ fAtlas = resources->addDeviceSpacePathToAtlas(*onFlushRP->caps(), fAccessRect, fDeviceSpacePath,
+ fPathDevIBounds, &fAtlasOffsetX, &fAtlasOffsetY);
+ SkDEBUGCODE(fHasAtlas = true);
+}
diff --git a/src/gpu/ccpr/GrCCClipPath.h b/src/gpu/ccpr/GrCCClipPath.h
new file mode 100644
index 0000000000..4b4ad75fc5
--- /dev/null
+++ b/src/gpu/ccpr/GrCCClipPath.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCClipPath_DEFINED
+#define GrCCClipPath_DEFINED
+
+#include "GrTextureProxy.h"
+#include "SkPath.h"
+
+class GrCCAtlas;
+class GrCCPerFlushResources;
+class GrOnFlushResourceProvider;
+class GrProxyProvider;
+
+/**
+ * These are keyed by SkPath generation ID, and store which device-space paths are accessed and
+ * where by clip FPs in a given opList. A single GrCCClipPath can be referenced by multiple FPs. At
+ * flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
+ */
+class GrCCClipPath {
+public:
+ GrCCClipPath() = default;
+ GrCCClipPath(const GrCCClipPath&) = delete;
+
+ ~GrCCClipPath() {
+ // Ensure no clip FPs exist with a dangling pointer back into this class.
+ SkASSERT(!fAtlasLazyProxy || fAtlasLazyProxy->isUnique_debugOnly());
+ // Ensure no lazy proxy callbacks exist with a dangling pointer back into this class.
+ SkASSERT(fHasAtlasTransform);
+ }
+
+ bool isInitialized() const { return fAtlasLazyProxy; }
+ void init(GrProxyProvider* proxyProvider,
+ const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ int rtWidth, int rtHeight);
+
+ void addAccess(const SkIRect& accessRect) {
+ SkASSERT(this->isInitialized());
+ fAccessRect.join(accessRect);
+ }
+ GrTextureProxy* atlasLazyProxy() const {
+ SkASSERT(this->isInitialized());
+ return fAtlasLazyProxy.get();
+ }
+ const SkPath& deviceSpacePath() const {
+ SkASSERT(this->isInitialized());
+ return fDeviceSpacePath;
+ }
+ const SkIRect& pathDevIBounds() const {
+ SkASSERT(this->isInitialized());
+ return fPathDevIBounds;
+ }
+
+ void placePathInAtlas(GrCCPerFlushResources*, GrOnFlushResourceProvider*);
+
+ const SkVector& atlasScale() const { SkASSERT(fHasAtlasTransform); return fAtlasScale; }
+ const SkVector& atlasTranslate() const { SkASSERT(fHasAtlasTransform); return fAtlasTranslate; }
+
+private:
+ sk_sp<GrTextureProxy> fAtlasLazyProxy;
+ SkPath fDeviceSpacePath;
+ SkIRect fPathDevIBounds;
+ SkIRect fAccessRect;
+
+ const GrCCAtlas* fAtlas = nullptr;
+ int16_t fAtlasOffsetX;
+ int16_t fAtlasOffsetY;
+ SkDEBUGCODE(bool fHasAtlas = false);
+
+ SkVector fAtlasScale;
+ SkVector fAtlasTranslate;
+ SkDEBUGCODE(bool fHasAtlasTransform = false);
+};
+
+#endif
diff --git a/src/gpu/ccpr/GrCCClipProcessor.cpp b/src/gpu/ccpr/GrCCClipProcessor.cpp
index 4a3b7a5216..d4da596039 100644
--- a/src/gpu/ccpr/GrCCClipProcessor.cpp
+++ b/src/gpu/ccpr/GrCCClipProcessor.cpp
@@ -10,11 +10,12 @@
#include "GrTexture.h"
#include "GrTextureProxy.h"
#include "SkMakeUnique.h"
+#include "ccpr/GrCCClipPath.h"
#include "glsl/GrGLSLFragmentProcessor.h"
#include "glsl/GrGLSLFragmentShaderBuilder.h"
-GrCCClipProcessor::GrCCClipProcessor(const ClipPath* clipPath, MustCheckBounds mustCheckBounds,
- SkPath::FillType overrideFillType)
+GrCCClipProcessor::GrCCClipProcessor(const GrCCClipPath* clipPath, MustCheckBounds mustCheckBounds,
+ SkPath::FillType overrideFillType)
: INHERITED(kGrCCClipProcessor_ClassID, kCompatibleWithCoverageAsAlpha_OptimizationFlag)
, fClipPath(clipPath)
, fMustCheckBounds((bool)mustCheckBounds)
@@ -26,7 +27,7 @@ GrCCClipProcessor::GrCCClipProcessor(const ClipPath* clipPath, MustCheckBounds m
std::unique_ptr<GrFragmentProcessor> GrCCClipProcessor::clone() const {
return skstd::make_unique<GrCCClipProcessor>(fClipPath, MustCheckBounds(fMustCheckBounds),
- fOverrideFillType);
+ fOverrideFillType);
}
void GrCCClipProcessor::onGetGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const {
diff --git a/src/gpu/ccpr/GrCCClipProcessor.h b/src/gpu/ccpr/GrCCClipProcessor.h
index 374a2dfe95..8c670e171d 100644
--- a/src/gpu/ccpr/GrCCClipProcessor.h
+++ b/src/gpu/ccpr/GrCCClipProcessor.h
@@ -9,18 +9,17 @@
#define GrCCClipProcessor_DEFINED
#include "GrFragmentProcessor.h"
-#include "ccpr/GrCoverageCountingPathRenderer.h"
+
+class GrCCClipPath;
class GrCCClipProcessor : public GrFragmentProcessor {
public:
- using ClipPath = GrCoverageCountingPathRenderer::ClipPath;
-
enum class MustCheckBounds : bool {
kNo = false,
kYes = true
};
- GrCCClipProcessor(const ClipPath*, MustCheckBounds, SkPath::FillType overrideFillType);
+ GrCCClipProcessor(const GrCCClipPath*, MustCheckBounds, SkPath::FillType overrideFillType);
const char* name() const override { return "GrCCClipProcessor"; }
std::unique_ptr<GrFragmentProcessor> clone() const override;
@@ -29,7 +28,7 @@ public:
GrGLSLFragmentProcessor* onCreateGLSLInstance() const override;
private:
- const ClipPath* const fClipPath;
+ const GrCCClipPath* const fClipPath;
const bool fMustCheckBounds;
const SkPath::FillType fOverrideFillType;
const TextureSampler fAtlasAccess;
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
new file mode 100644
index 0000000000..68a3d9c296
--- /dev/null
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrCCDrawPathsOp.h"
+
+#include "GrGpuCommandBuffer.h"
+#include "GrOpFlushState.h"
+#include "ccpr/GrCCPerFlushResources.h"
+#include "ccpr/GrCoverageCountingPathRenderer.h"
+
+GrCCDrawPathsOp::GrCCDrawPathsOp(GrCoverageCountingPathRenderer* ccpr, GrPaint&& paint,
+ const SkIRect& clipIBounds, const SkMatrix& viewMatrix,
+ const SkPath& path, const SkRect& devBounds)
+ : INHERITED(ClassID())
+ , fCCPR(ccpr)
+ , fHeadDraw{clipIBounds, viewMatrix, path, paint.getColor(), nullptr}
+ , fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(paint))
+ , fProcessors(std::move(paint)) {
+ SkDEBUGCODE(fCCPR->incrDrawOpCount_debugOnly());
+ SkDEBUGCODE(fBaseInstance = -1);
+ SkDEBUGCODE(fInstanceCount = 1);
+ SkDEBUGCODE(fNumSkippedInstances = 0);
+ // FIXME: intersect with clip bounds to (hopefully) improve batching.
+ // (This is nontrivial due to assumptions in generating the octagon cover geometry.)
+ this->setBounds(devBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo);
+}
+
+GrCCDrawPathsOp::~GrCCDrawPathsOp() {
+ if (fOwningRTPendingPaths) {
+ // Remove CCPR's dangling pointer to this Op before deleting it.
+ fOwningRTPendingPaths->fDrawOps.remove(this);
+ }
+ SkDEBUGCODE(fCCPR->decrDrawOpCount_debugOnly());
+}
+
+GrDrawOp::RequiresDstTexture GrCCDrawPathsOp::finalize(const GrCaps& caps,
+ const GrAppliedClip* clip,
+ GrPixelConfigIsClamped dstIsClamped) {
+ SkASSERT(!fCCPR->isFlushing_debugOnly());
+ // There should only be one single path draw in this Op right now.
+ SkASSERT(1 == fInstanceCount);
+ SkASSERT(&fHeadDraw == fTailDraw);
+ GrProcessorSet::Analysis analysis =
+ fProcessors.finalize(fHeadDraw.fColor, GrProcessorAnalysisCoverage::kSingleChannel,
+ clip, false, caps, dstIsClamped, &fHeadDraw.fColor);
+ return analysis.requiresDstTexture() ? RequiresDstTexture::kYes : RequiresDstTexture::kNo;
+}
+
+bool GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) {
+ GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
+ SkASSERT(fCCPR == that->fCCPR);
+ SkASSERT(!fCCPR->isFlushing_debugOnly());
+ SkASSERT(fOwningRTPendingPaths);
+ SkASSERT(fInstanceCount);
+ SkASSERT(!that->fOwningRTPendingPaths || that->fOwningRTPendingPaths == fOwningRTPendingPaths);
+ SkASSERT(that->fInstanceCount);
+
+ if (this->getFillType() != that->getFillType() || fSRGBFlags != that->fSRGBFlags ||
+ fProcessors != that->fProcessors) {
+ return false;
+ }
+
+ fTailDraw->fNext = &fOwningRTPendingPaths->fDrawsAllocator.push_back(that->fHeadDraw);
+ fTailDraw = (that->fTailDraw == &that->fHeadDraw) ? fTailDraw->fNext : that->fTailDraw;
+
+ this->joinBounds(*that);
+
+ SkDEBUGCODE(fInstanceCount += that->fInstanceCount);
+ SkDEBUGCODE(that->fInstanceCount = 0);
+ return true;
+}
+
+void GrCCDrawPathsOp::wasRecorded(GrRenderTargetOpList* opList) {
+ SkASSERT(!fOwningRTPendingPaths);
+ fOwningRTPendingPaths = fCCPR->lookupRTPendingPaths(opList);
+ fOwningRTPendingPaths->fDrawOps.addToTail(this);
+}
+
+void GrCCDrawPathsOp::setupResources(GrCCPerFlushResources* resources,
+ GrOnFlushResourceProvider* onFlushRP) {
+ const GrCCAtlas* currentAtlas = nullptr;
+ SkASSERT(fInstanceCount > 0);
+ SkASSERT(-1 == fBaseInstance);
+ fBaseInstance = resources->pathInstanceCount();
+
+ for (const SingleDraw* draw = this->head(); draw; draw = draw->fNext) {
+ // addPathToAtlas gives us two tight bounding boxes: one in device space, as well as a
+ // second one rotated an additional 45 degrees. The path vertex shader uses these two
+ // bounding boxes to generate an octagon that circumscribes the path.
+ SkRect devBounds, devBounds45;
+ int16_t atlasOffsetX, atlasOffsetY;
+ GrCCAtlas* atlas = resources->addPathToAtlas(*onFlushRP->caps(), draw->fClipIBounds,
+ draw->fMatrix, draw->fPath, &devBounds,
+ &devBounds45, &atlasOffsetX, &atlasOffsetY);
+ if (!atlas) {
+ SkDEBUGCODE(++fNumSkippedInstances);
+ continue;
+ }
+ if (currentAtlas != atlas) {
+ if (currentAtlas) {
+ this->addAtlasBatch(currentAtlas, resources->pathInstanceCount());
+ }
+ currentAtlas = atlas;
+ }
+
+ const SkMatrix& m = draw->fMatrix;
+ resources->appendDrawPathInstance(
+ devBounds,
+ devBounds45,
+ {{m.getScaleX(), m.getSkewY(), m.getSkewX(), m.getScaleY()}},
+ {{m.getTranslateX(), m.getTranslateY()}},
+ {{atlasOffsetX, atlasOffsetY}},
+ draw->fColor);
+ }
+
+ SkASSERT(resources->pathInstanceCount() ==
+ fBaseInstance + fInstanceCount - fNumSkippedInstances);
+ if (currentAtlas) {
+ this->addAtlasBatch(currentAtlas, resources->pathInstanceCount());
+ }
+}
+
+void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState) {
+ const GrCCPerFlushResources* resources = fCCPR->getPerFlushResources();
+ if (!resources) {
+ return; // Setup failed.
+ }
+
+ SkASSERT(fBaseInstance >= 0); // Make sure setupResources has been called.
+
+ GrPipeline::InitArgs initArgs;
+ initArgs.fFlags = fSRGBFlags;
+ initArgs.fProxy = flushState->drawOpArgs().fProxy;
+ initArgs.fCaps = &flushState->caps();
+ initArgs.fResourceProvider = flushState->resourceProvider();
+ initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
+ GrPipeline pipeline(initArgs, std::move(fProcessors), flushState->detachAppliedClip());
+
+ int baseInstance = fBaseInstance;
+
+ for (int i = 0; i < fAtlasBatches.count(); baseInstance = fAtlasBatches[i++].fEndInstanceIdx) {
+ const AtlasBatch& batch = fAtlasBatches[i];
+ SkASSERT(batch.fEndInstanceIdx > baseInstance);
+
+ if (!batch.fAtlas->textureProxy()) {
+ continue; // Atlas failed to allocate.
+ }
+
+ GrCCPathProcessor pathProc(flushState->resourceProvider(),
+ sk_ref_sp(batch.fAtlas->textureProxy()), this->getFillType());
+ pathProc.drawPaths(flushState, pipeline, resources->indexBuffer(),
+ resources->vertexBuffer(), resources->instanceBuffer(),
+ baseInstance, batch.fEndInstanceIdx, this->bounds());
+ }
+
+ SkASSERT(baseInstance == fBaseInstance + fInstanceCount - fNumSkippedInstances);
+}
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
new file mode 100644
index 0000000000..aad8508fb0
--- /dev/null
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCDrawPathsOp_DEFINED
+#define GrCCDrawPathsOp_DEFINED
+
+#include "SkTInternalLList.h"
+#include "ccpr/GrCCPathProcessor.h"
+#include "ops/GrDrawOp.h"
+
+class GrCCAtlas;
+class GrCCPerFlushResources;
+struct GrCCRTPendingPaths;
+class GrCoverageCountingPathRenderer;
+
+/**
+ * This is the Op that draws paths to the actual canvas, using atlases generated by CCPR.
+ */
+class GrCCDrawPathsOp : public GrDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCDrawPathsOp);
+
+ GrCCDrawPathsOp(GrCoverageCountingPathRenderer*, GrPaint&&, const SkIRect& clipIBounds,
+ const SkMatrix&, const SkPath&, const SkRect& devBounds);
+ ~GrCCDrawPathsOp() override;
+
+ struct SingleDraw {
+ SkIRect fClipIBounds;
+ SkMatrix fMatrix;
+ SkPath fPath;
+ GrColor fColor;
+ SingleDraw* fNext;
+ };
+
+ const SingleDraw* head() const {
+ SkASSERT(fInstanceCount >= 1);
+ return &fHeadDraw;
+ }
+
+ SkDEBUGCODE(int numSkippedInstances_debugOnly() const { return fNumSkippedInstances; })
+
+ const char* name() const override { return "GrCCDrawOp"; }
+ FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
+ RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
+ GrPixelConfigIsClamped) override;
+ void wasRecorded(GrRenderTargetOpList*) override;
+ bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
+ void visitProxies(const VisitProxyFunc& func) const override {
+ fProcessors.visitProxies(func);
+ }
+ void onPrepare(GrOpFlushState*) override {}
+
+ void setupResources(GrCCPerFlushResources*, GrOnFlushResourceProvider*);
+
+ void onExecute(GrOpFlushState*) override;
+
+private:
+ SkPath::FillType getFillType() const {
+ SkASSERT(fInstanceCount >= 1);
+ return fHeadDraw.fPath.getFillType();
+ }
+
+ struct AtlasBatch {
+ const GrCCAtlas* fAtlas;
+ int fEndInstanceIdx;
+ };
+
+ void addAtlasBatch(const GrCCAtlas* atlas, int endInstanceIdx) {
+ SkASSERT(endInstanceIdx > fBaseInstance);
+ SkASSERT(fAtlasBatches.empty() ||
+ endInstanceIdx > fAtlasBatches.back().fEndInstanceIdx);
+ fAtlasBatches.push_back() = {atlas, endInstanceIdx};
+ }
+
+ GrCoverageCountingPathRenderer* const fCCPR;
+ GrCCRTPendingPaths* fOwningRTPendingPaths = nullptr;
+
+ SingleDraw fHeadDraw;
+ SingleDraw* fTailDraw = &fHeadDraw;
+
+ const uint32_t fSRGBFlags;
+ GrProcessorSet fProcessors;
+
+ int fBaseInstance;
+ SkDEBUGCODE(int fInstanceCount);
+ SkDEBUGCODE(int fNumSkippedInstances);
+ SkSTArray<1, AtlasBatch, true> fAtlasBatches;
+
+ typedef GrDrawOp INHERITED;
+};
+
+#endif
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
new file mode 100644
index 0000000000..a658bf7438
--- /dev/null
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrCCPerFlushResources.h"
+
+#include "GrOnFlushResourceProvider.h"
+#include "GrRenderTargetContext.h"
+#include "SkIPoint16.h"
+
+using PathInstance = GrCCPathProcessor::Instance;
+
+GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
+ int numPathDraws, int numClipPaths,
+ const GrCCPathParser::PathStats& pathStats)
+ : fPathParser(sk_make_sp<GrCCPathParser>(numPathDraws + numClipPaths, pathStats))
+ , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
+ , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
+ , fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
+ numPathDraws * sizeof(PathInstance))) {
+ if (!fIndexBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
+ return;
+ }
+ if (!fVertexBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
+ return;
+ }
+ if (!fInstanceBuffer) {
+ SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
+ return;
+ }
+ fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
+ SkASSERT(fPathInstanceData);
+ SkDEBUGCODE(fPathInstanceBufferCount = numPathDraws);
+}
+
+GrCCAtlas* GrCCPerFlushResources::addPathToAtlas(const GrCaps& caps, const SkIRect& clipIBounds,
+ const SkMatrix& m, const SkPath& path,
+ SkRect* devBounds, SkRect* devBounds45,
+ int16_t* atlasOffsetX, int16_t* atlasOffsetY) {
+ SkASSERT(this->isMapped());
+ SkIRect devIBounds;
+ fPathParser->parsePath(m, path, devBounds, devBounds45);
+ devBounds->roundOut(&devIBounds);
+ return this->placeParsedPathInAtlas(caps, clipIBounds, devIBounds, atlasOffsetX, atlasOffsetY);
+}
+
+GrCCAtlas* GrCCPerFlushResources::addDeviceSpacePathToAtlas(const GrCaps& caps,
+ const SkIRect& clipIBounds,
+ const SkPath& devPath,
+ const SkIRect& devPathIBounds,
+ int16_t* atlasOffsetX,
+ int16_t* atlasOffsetY) {
+ SkASSERT(this->isMapped());
+ fPathParser->parseDeviceSpacePath(devPath);
+ return this->placeParsedPathInAtlas(caps, clipIBounds, devPathIBounds, atlasOffsetX,
+ atlasOffsetY);
+}
+
+GrCCAtlas* GrCCPerFlushResources::placeParsedPathInAtlas(const GrCaps& caps,
+ const SkIRect& clipIBounds,
+ const SkIRect& pathIBounds,
+ int16_t* atlasOffsetX,
+ int16_t* atlasOffsetY) {
+ using ScissorMode = GrCCPathParser::ScissorMode;
+ ScissorMode scissorMode;
+ SkIRect clippedPathIBounds;
+ if (clipIBounds.contains(pathIBounds)) {
+ clippedPathIBounds = pathIBounds;
+ scissorMode = ScissorMode::kNonScissored;
+ } else if (clippedPathIBounds.intersect(clipIBounds, pathIBounds)) {
+ scissorMode = ScissorMode::kScissored;
+ } else {
+ fPathParser->discardParsedPath();
+ return nullptr;
+ }
+
+ SkIPoint16 atlasLocation;
+ int h = clippedPathIBounds.height(), w = clippedPathIBounds.width();
+ if (fAtlases.empty() || !fAtlases.back().addRect(w, h, &atlasLocation)) {
+ if (!fAtlases.empty()) {
+ // The atlas is out of room and can't grow any bigger.
+ auto coverageCountBatchID = fPathParser->closeCurrentBatch();
+ fAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
+ }
+ fAtlases.emplace_back(caps, SkTMax(w, h));
+ SkAssertResult(fAtlases.back().addRect(w, h, &atlasLocation));
+ }
+
+ *atlasOffsetX = atlasLocation.x() - static_cast<int16_t>(clippedPathIBounds.left());
+ *atlasOffsetY = atlasLocation.y() - static_cast<int16_t>(clippedPathIBounds.top());
+ fPathParser->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX, *atlasOffsetY);
+
+ return &fAtlases.back();
+}
+
+bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
+ SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws) {
+ SkASSERT(this->isMapped());
+ fInstanceBuffer->unmap();
+ fPathInstanceData = nullptr;
+
+ if (!fAtlases.empty()) {
+ auto coverageCountBatchID = fPathParser->closeCurrentBatch();
+ fAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
+ }
+
+ if (!fPathParser->finalize(onFlushRP)) {
+ SkDebugf("WARNING: failed to allocate GPU buffers for CCPR. No paths will be drawn.\n");
+ return false;
+ }
+
+ // Draw the atlas(es).
+ GrTAllocator<GrCCAtlas>::Iter atlasIter(&fAtlases);
+ while (atlasIter.next()) {
+ if (auto rtc = atlasIter.get()->finalize(onFlushRP, fPathParser)) {
+ atlasDraws->push_back(std::move(rtc));
+ }
+ }
+
+ return true;
+}
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
new file mode 100644
index 0000000000..89ad6ae708
--- /dev/null
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCPerFlushResources_DEFINED
+#define GrCCPerFlushResources_DEFINED
+
+#include "GrAllocator.h"
+#include "ccpr/GrCCAtlas.h"
+#include "ccpr/GrCCPathParser.h"
+#include "ccpr/GrCCPathProcessor.h"
+
+/**
+ * This class wraps all the GPU resources that CCPR builds at flush time.
+ */
+class GrCCPerFlushResources {
+public:
+ GrCCPerFlushResources(GrOnFlushResourceProvider*, int numPathDraws, int numClipPaths,
+ const GrCCPathParser::PathStats&);
+
+ bool isMapped() const { return SkToBool(fPathInstanceData); }
+
+ GrCCAtlas* addPathToAtlas(const GrCaps&, const SkIRect& clipIBounds, const SkMatrix&,
+ const SkPath&, SkRect* devBounds, SkRect* devBounds45,
+ int16_t* offsetX, int16_t* offsetY);
+ GrCCAtlas* addDeviceSpacePathToAtlas(const GrCaps&, const SkIRect& clipIBounds,
+ const SkPath& devPath, const SkIRect& devPathIBounds,
+ int16_t* atlasOffsetX, int16_t* atlasOffsetY);
+
+ // See GrCCPathProcessor::Instance.
+ int appendDrawPathInstance(const SkRect& devBounds, const SkRect& devBounds45,
+ const std::array<float, 4>& viewMatrix,
+ const std::array<float, 2>& viewTranslate,
+ const std::array<int16_t, 2>& atlasOffset, uint32_t color) {
+ SkASSERT(this->isMapped());
+ SkASSERT(fPathInstanceCount < fPathInstanceBufferCount);
+ fPathInstanceData[fPathInstanceCount] = {devBounds, devBounds45, viewMatrix, viewTranslate,
+ atlasOffset, color};
+ return fPathInstanceCount++;
+ }
+ int pathInstanceCount() const { return fPathInstanceCount; }
+
+ bool finalize(GrOnFlushResourceProvider*,
+ SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws);
+
+ const GrBuffer* indexBuffer() const { SkASSERT(!this->isMapped()); return fIndexBuffer.get(); }
+ const GrBuffer* vertexBuffer() const { SkASSERT(!this->isMapped()); return fVertexBuffer.get();}
+ GrBuffer* instanceBuffer() const { SkASSERT(!this->isMapped()); return fInstanceBuffer.get(); }
+
+private:
+ GrCCAtlas* placeParsedPathInAtlas(const GrCaps&, const SkIRect& clipIBounds,
+ const SkIRect& pathIBounds, int16_t* atlasOffsetX,
+ int16_t* atlasOffsetY);
+
+ const sk_sp<GrCCPathParser> fPathParser;
+
+ sk_sp<const GrBuffer> fIndexBuffer;
+ sk_sp<const GrBuffer> fVertexBuffer;
+ sk_sp<GrBuffer> fInstanceBuffer;
+
+ GrCCPathProcessor::Instance* fPathInstanceData = nullptr;
+ int fPathInstanceCount = 0;
+ SkDEBUGCODE(int fPathInstanceBufferCount);
+
+ GrSTAllocator<4, GrCCAtlas> fAtlases;
+};
+
+#endif
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 6c2c00b91b..4358d21e24 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -9,32 +9,27 @@
#include "GrCaps.h"
#include "GrClip.h"
-#include "GrGpu.h"
-#include "GrGpuCommandBuffer.h"
-#include "GrOpFlushState.h"
#include "GrProxyProvider.h"
-#include "GrRenderTargetOpList.h"
-#include "GrStyle.h"
-#include "GrTexture.h"
#include "SkMakeUnique.h"
-#include "SkMatrix.h"
#include "SkPathOps.h"
#include "ccpr/GrCCClipProcessor.h"
+#include "ccpr/GrCCPathParser.h"
+#include "ccpr/GrCCPerFlushResources.h"
-// Shorthand for keeping line lengths under control with nested classes...
-using CCPR = GrCoverageCountingPathRenderer;
+using PathInstance = GrCCPathProcessor::Instance;
// If a path spans more pixels than this, we need to crop it or else analytic AA can run out of fp32
// precision.
static constexpr float kPathCropThreshold = 1 << 16;
static void crop_path(const SkPath& path, const SkIRect& cropbox, SkPath* out) {
- SkPath cropPath;
- cropPath.addRect(SkRect::Make(cropbox));
- if (!Op(cropPath, path, kIntersect_SkPathOp, out)) {
+ SkPath cropboxPath;
+ cropboxPath.addRect(SkRect::Make(cropbox));
+ if (!Op(cropboxPath, path, kIntersect_SkPathOp, out)) {
// This can fail if the PathOps encounter NaN or infinities.
out->reset();
}
+ out->setIsVolatile(true);
}
bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
@@ -52,6 +47,16 @@ sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSu
return sk_sp<GrCoverageCountingPathRenderer>(ccpr);
}
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(bool drawCachablePaths)
+ : fDrawCachablePaths(drawCachablePaths) {
+}
+
+GrCoverageCountingPathRenderer::~GrCoverageCountingPathRenderer() {
+ // Ensure no Ops exist that could have a dangling pointer back into this class.
+ SkASSERT(fRTPendingPathsMap.empty());
+ SkASSERT(0 == fNumOutstandingDrawOps);
+}
+
GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
const CanDrawPathArgs& args) const {
if (args.fShape->hasUnstyledKey() && !fDrawCachablePaths) {
@@ -91,100 +96,34 @@ GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
SkASSERT(!fFlushing);
- auto op = skstd::make_unique<DrawPathsOp>(this, args, args.fPaint.getColor());
- args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
- return true;
-}
-CCPR::DrawPathsOp::DrawPathsOp(GrCoverageCountingPathRenderer* ccpr, const DrawPathArgs& args,
- GrColor color)
- : INHERITED(ClassID())
- , fCCPR(ccpr)
- , fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(args.fPaint))
- , fProcessors(std::move(args.fPaint))
- , fTailDraw(&fHeadDraw)
- , fOwningRTPendingPaths(nullptr) {
- SkDEBUGCODE(++fCCPR->fPendingDrawOpsCount);
- SkDEBUGCODE(fBaseInstance = -1);
- SkDEBUGCODE(fInstanceCount = 1);
- SkDEBUGCODE(fNumSkippedInstances = 0);
- GrRenderTargetContext* const rtc = args.fRenderTargetContext;
+ SkIRect clipIBounds;
+ GrRenderTargetContext* rtc = args.fRenderTargetContext;
+ args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &clipIBounds, nullptr);
- SkRect devBounds;
- args.fViewMatrix->mapRect(&devBounds, args.fShape->bounds());
- args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &fHeadDraw.fClipIBounds,
- nullptr);
- if (SkTMax(devBounds.height(), devBounds.width()) > kPathCropThreshold) {
- // The path is too large. We need to crop it or analytic AA can run out of fp32 precision.
- SkPath path;
- args.fShape->asPath(&path);
- path.transform(*args.fViewMatrix);
- fHeadDraw.fMatrix.setIdentity();
- crop_path(path, fHeadDraw.fClipIBounds, &fHeadDraw.fPath);
- devBounds = fHeadDraw.fPath.getBounds();
- } else {
- fHeadDraw.fMatrix = *args.fViewMatrix;
- args.fShape->asPath(&fHeadDraw.fPath);
- }
- fHeadDraw.fColor = color; // Can't call args.fPaint.getColor() because it has been std::move'd.
-
- // FIXME: intersect with clip bounds to (hopefully) improve batching.
- // (This is nontrivial due to assumptions in generating the octagon cover geometry.)
- this->setBounds(devBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo);
-}
-
-CCPR::DrawPathsOp::~DrawPathsOp() {
- if (fOwningRTPendingPaths) {
- // Remove CCPR's dangling pointer to this Op before deleting it.
- fOwningRTPendingPaths->fDrawOps.remove(this);
- }
- SkDEBUGCODE(--fCCPR->fPendingDrawOpsCount);
-}
-
-GrDrawOp::RequiresDstTexture CCPR::DrawPathsOp::finalize(const GrCaps& caps,
- const GrAppliedClip* clip,
- GrPixelConfigIsClamped dstIsClamped) {
- SkASSERT(!fCCPR->fFlushing);
- // There should only be one single path draw in this Op right now.
- SkASSERT(1 == fInstanceCount);
- SkASSERT(&fHeadDraw == fTailDraw);
- GrProcessorSet::Analysis analysis =
- fProcessors.finalize(fHeadDraw.fColor, GrProcessorAnalysisCoverage::kSingleChannel,
- clip, false, caps, dstIsClamped, &fHeadDraw.fColor);
- return analysis.requiresDstTexture() ? RequiresDstTexture::kYes : RequiresDstTexture::kNo;
-}
-
-bool CCPR::DrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) {
- DrawPathsOp* that = op->cast<DrawPathsOp>();
- SkASSERT(fCCPR == that->fCCPR);
- SkASSERT(!fCCPR->fFlushing);
- SkASSERT(fOwningRTPendingPaths);
- SkASSERT(fInstanceCount);
- SkASSERT(!that->fOwningRTPendingPaths || that->fOwningRTPendingPaths == fOwningRTPendingPaths);
- SkASSERT(that->fInstanceCount);
-
- if (this->getFillType() != that->getFillType() || fSRGBFlags != that->fSRGBFlags ||
- fProcessors != that->fProcessors) {
- return false;
- }
-
- fTailDraw->fNext = &fOwningRTPendingPaths->fDrawsAllocator.push_back(that->fHeadDraw);
- fTailDraw = (that->fTailDraw == &that->fHeadDraw) ? fTailDraw->fNext : that->fTailDraw;
+ SkPath path;
+ args.fShape->asPath(&path);
- this->joinBounds(*that);
+ SkRect devBounds;
+ args.fViewMatrix->mapRect(&devBounds, path.getBounds());
- SkDEBUGCODE(fInstanceCount += that->fInstanceCount);
- SkDEBUGCODE(that->fInstanceCount = 0);
+ if (SkTMax(devBounds.height(), devBounds.width()) > kPathCropThreshold) {
+ // The path is too large. Crop it or analytic AA can run out of fp32 precision.
+ SkPath croppedPath;
+ path.transform(*args.fViewMatrix, &croppedPath);
+ crop_path(croppedPath, clipIBounds, &croppedPath);
+ auto op = skstd::make_unique<GrCCDrawPathsOp>(this, std::move(args.fPaint), clipIBounds,
+ SkMatrix::I(), croppedPath, path.getBounds());
+ rtc->addDrawOp(*args.fClip, std::move(op));
+ return true;
+ }
+
+ auto op = skstd::make_unique<GrCCDrawPathsOp>(this, std::move(args.fPaint), clipIBounds,
+ *args.fViewMatrix, path, devBounds);
+ rtc->addDrawOp(*args.fClip, std::move(op));
return true;
}
-void CCPR::DrawPathsOp::wasRecorded(GrRenderTargetOpList* opList) {
- SkASSERT(!fCCPR->fFlushing);
- SkASSERT(!fOwningRTPendingPaths);
- fOwningRTPendingPaths = &fCCPR->fRTPendingPathsMap[opList->uniqueID()];
- fOwningRTPendingPaths->fDrawOps.addToTail(this);
-}
-
std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
GrProxyProvider* proxyProvider,
uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
@@ -193,10 +132,20 @@ std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipPro
SkASSERT(!fFlushing);
- ClipPath& clipPath = fRTPendingPathsMap[opListID].fClipPaths[deviceSpacePath.getGenerationID()];
- if (clipPath.isUninitialized()) {
+ GrCCClipPath& clipPath =
+ fRTPendingPathsMap[opListID].fClipPaths[deviceSpacePath.getGenerationID()];
+ if (!clipPath.isInitialized()) {
// This ClipPath was just created during lookup. Initialize it.
- clipPath.init(proxyProvider, deviceSpacePath, accessRect, rtWidth, rtHeight);
+ const SkRect& pathDevBounds = deviceSpacePath.getBounds();
+ if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
+ // The path is too large. Crop it or analytic AA can run out of fp32 precision.
+ SkPath croppedPath;
+ int maxRTSize = proxyProvider->caps()->maxRenderTargetSize();
+ crop_path(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
+ clipPath.init(proxyProvider, croppedPath, accessRect, rtWidth, rtHeight);
+ } else {
+ clipPath.init(proxyProvider, deviceSpacePath, accessRect, rtWidth, rtHeight);
+ }
} else {
clipPath.addAccess(accessRect);
}
@@ -206,68 +155,17 @@ std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipPro
deviceSpacePath.getFillType());
}
-void CCPR::ClipPath::init(GrProxyProvider* proxyProvider,
- const SkPath& deviceSpacePath, const SkIRect& accessRect,
- int rtWidth, int rtHeight) {
- SkASSERT(this->isUninitialized());
-
- fAtlasLazyProxy = proxyProvider->createFullyLazyProxy(
- [this](GrResourceProvider* resourceProvider) {
- if (!resourceProvider) {
- return sk_sp<GrTexture>();
- }
- SkASSERT(fHasAtlas);
- SkASSERT(!fHasAtlasTransform);
-
- GrTextureProxy* textureProxy = fAtlas ? fAtlas->textureProxy() : nullptr;
- if (!textureProxy || !textureProxy->instantiate(resourceProvider)) {
- fAtlasScale = fAtlasTranslate = {0, 0};
- SkDEBUGCODE(fHasAtlasTransform = true);
- return sk_sp<GrTexture>();
- }
-
- SkASSERT(kTopLeft_GrSurfaceOrigin == textureProxy->origin());
-
- fAtlasScale = {1.f / textureProxy->width(), 1.f / textureProxy->height()};
- fAtlasTranslate = {fAtlasOffsetX * fAtlasScale.x(),
- fAtlasOffsetY * fAtlasScale.y()};
- SkDEBUGCODE(fHasAtlasTransform = true);
-
- return sk_ref_sp(textureProxy->priv().peekTexture());
- },
- GrProxyProvider::Renderable::kYes, kTopLeft_GrSurfaceOrigin, kAlpha_half_GrPixelConfig);
-
- const SkRect& pathDevBounds = deviceSpacePath.getBounds();
- if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
- // The path is too large. We need to crop it or analytic AA can run out of fp32 precision.
- crop_path(deviceSpacePath, SkIRect::MakeWH(rtWidth, rtHeight), &fDeviceSpacePath);
- } else {
- fDeviceSpacePath = deviceSpacePath;
- }
- deviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
- fAccessRect = accessRect;
-}
-
void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
const uint32_t* opListIDs, int numOpListIDs,
- SkTArray<sk_sp<GrRenderTargetContext>>* results) {
- using PathInstance = GrCCPathProcessor::Instance;
-
+ SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws) {
SkASSERT(!fFlushing);
- SkASSERT(fFlushingRTPathIters.empty());
- SkASSERT(!fPerFlushIndexBuffer);
- SkASSERT(!fPerFlushVertexBuffer);
- SkASSERT(!fPerFlushInstanceBuffer);
- SkASSERT(!fPerFlushPathParser);
- SkASSERT(fPerFlushAtlases.empty());
+ SkASSERT(!fPerFlushResources);
SkDEBUGCODE(fFlushing = true);
if (fRTPendingPathsMap.empty()) {
return; // Nothing to draw.
}
- fPerFlushResourcesAreValid = false;
-
// Count up the paths about to be flushed so we can preallocate buffers.
int numPathDraws = 0;
int numClipPaths = 0;
@@ -278,13 +176,13 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
if (fRTPendingPathsMap.end() == iter) {
continue;
}
- const RTPendingPaths& rtPendingPaths = iter->second;
+ const GrCCRTPendingPaths& rtPendingPaths = iter->second;
- SkTInternalLList<DrawPathsOp>::Iter drawOpsIter;
+ SkTInternalLList<GrCCDrawPathsOp>::Iter drawOpsIter;
drawOpsIter.init(rtPendingPaths.fDrawOps,
- SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
- while (DrawPathsOp* op = drawOpsIter.get()) {
- for (const DrawPathsOp::SingleDraw* draw = op->head(); draw; draw = draw->fNext) {
+ SkTInternalLList<GrCCDrawPathsOp>::Iter::kHead_IterStart);
+ while (GrCCDrawPathsOp* op = drawOpsIter.get()) {
+ for (const GrCCDrawPathsOp::SingleDraw* draw = op->head(); draw; draw = draw->fNext) {
++numPathDraws;
flushingPathStats.statPath(draw->fPath);
}
@@ -303,225 +201,48 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
return; // Nothing to draw.
}
- // Allocate GPU buffers.
- fPerFlushIndexBuffer = GrCCPathProcessor::FindIndexBuffer(onFlushRP);
- if (!fPerFlushIndexBuffer) {
- SkDebugf("WARNING: failed to allocate ccpr path index buffer.\n");
- return;
- }
-
- fPerFlushVertexBuffer = GrCCPathProcessor::FindVertexBuffer(onFlushRP);
- if (!fPerFlushVertexBuffer) {
- SkDebugf("WARNING: failed to allocate ccpr path vertex buffer.\n");
- return;
+ auto resources = skstd::make_unique<GrCCPerFlushResources>(onFlushRP, numPathDraws,
+ numClipPaths, flushingPathStats);
+ if (!resources->isMapped()) {
+ return; // Some allocation failed.
}
- fPerFlushInstanceBuffer =
- onFlushRP->makeBuffer(kVertex_GrBufferType, numPathDraws * sizeof(PathInstance));
- if (!fPerFlushInstanceBuffer) {
- SkDebugf("WARNING: failed to allocate path instance buffer. No paths will be drawn.\n");
- return;
- }
-
- PathInstance* pathInstanceData = static_cast<PathInstance*>(fPerFlushInstanceBuffer->map());
- SkASSERT(pathInstanceData);
- int pathInstanceIdx = 0;
-
- fPerFlushPathParser = sk_make_sp<GrCCPathParser>(numPathDraws + numClipPaths,
- flushingPathStats);
+ // Layout atlas(es) and parse paths.
SkDEBUGCODE(int numSkippedPaths = 0);
+ for (int i = 0; i < numOpListIDs; ++i) {
+ auto it = fRTPendingPathsMap.find(opListIDs[i]);
+ if (fRTPendingPathsMap.end() == it) {
+ continue;
+ }
+ GrCCRTPendingPaths& rtPendingPaths = it->second;
- // Allocate atlas(es) and fill out GPU instance buffers.
- for (const auto& iter : fFlushingRTPathIters) {
- RTPendingPaths* rtPendingPaths = &iter->second;
-
- SkTInternalLList<DrawPathsOp>::Iter drawOpsIter;
- drawOpsIter.init(rtPendingPaths->fDrawOps,
- SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
- while (DrawPathsOp* op = drawOpsIter.get()) {
- pathInstanceIdx = op->setupResources(onFlushRP, pathInstanceData, pathInstanceIdx);
+ SkTInternalLList<GrCCDrawPathsOp>::Iter drawOpsIter;
+ drawOpsIter.init(rtPendingPaths.fDrawOps,
+ SkTInternalLList<GrCCDrawPathsOp>::Iter::kHead_IterStart);
+ while (GrCCDrawPathsOp* op = drawOpsIter.get()) {
+ op->setupResources(resources.get(), onFlushRP);
drawOpsIter.next();
SkDEBUGCODE(numSkippedPaths += op->numSkippedInstances_debugOnly());
}
- for (auto& clipsIter : rtPendingPaths->fClipPaths) {
- clipsIter.second.placePathInAtlas(this, onFlushRP, fPerFlushPathParser.get());
+ for (auto& clipsIter : rtPendingPaths.fClipPaths) {
+ clipsIter.second.placePathInAtlas(resources.get(), onFlushRP);
}
}
+ SkASSERT(resources->pathInstanceCount() == numPathDraws - numSkippedPaths);
- fPerFlushInstanceBuffer->unmap();
-
- SkASSERT(pathInstanceIdx == numPathDraws - numSkippedPaths);
-
- if (!fPerFlushAtlases.empty()) {
- auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
- fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
- }
-
- if (!fPerFlushPathParser->finalize(onFlushRP)) {
- SkDebugf("WARNING: failed to allocate GPU buffers for CCPR. No paths will be drawn.\n");
+ // Allocate the atlases and create instance buffers to draw them.
+ if (!resources->finalize(onFlushRP, atlasDraws)) {
return;
}
- // Draw the atlas(es).
- GrTAllocator<GrCCAtlas>::Iter atlasIter(&fPerFlushAtlases);
- while (atlasIter.next()) {
- if (auto rtc = atlasIter.get()->finalize(onFlushRP, fPerFlushPathParser)) {
- results->push_back(std::move(rtc));
- }
- }
-
- fPerFlushResourcesAreValid = true;
-}
-
-int CCPR::DrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
- GrCCPathProcessor::Instance* pathInstanceData,
- int pathInstanceIdx) {
- GrCCPathParser* parser = fCCPR->fPerFlushPathParser.get();
- const GrCCAtlas* currentAtlas = nullptr;
- SkASSERT(fInstanceCount > 0);
- SkASSERT(-1 == fBaseInstance);
- fBaseInstance = pathInstanceIdx;
-
- for (const SingleDraw* draw = this->head(); draw; draw = draw->fNext) {
- // parsePath gives us two tight bounding boxes: one in device space, as well as a second
- // one rotated an additional 45 degrees. The path vertex shader uses these two bounding
- // boxes to generate an octagon that circumscribes the path.
- SkRect devBounds, devBounds45;
- parser->parsePath(draw->fMatrix, draw->fPath, &devBounds, &devBounds45);
-
- SkIRect devIBounds;
- devBounds.roundOut(&devIBounds);
-
- int16_t offsetX, offsetY;
- GrCCAtlas* atlas = fCCPR->placeParsedPathInAtlas(onFlushRP, draw->fClipIBounds, devIBounds,
- &offsetX, &offsetY);
- if (!atlas) {
- SkDEBUGCODE(++fNumSkippedInstances);
- continue;
- }
- if (currentAtlas != atlas) {
- if (currentAtlas) {
- this->addAtlasBatch(currentAtlas, pathInstanceIdx);
- }
- currentAtlas = atlas;
- }
-
- const SkMatrix& m = draw->fMatrix;
- pathInstanceData[pathInstanceIdx++] = {
- devBounds,
- devBounds45,
- {{m.getScaleX(), m.getSkewY(), m.getSkewX(), m.getScaleY()}},
- {{m.getTranslateX(), m.getTranslateY()}},
- {{offsetX, offsetY}},
- draw->fColor};
- }
-
- SkASSERT(pathInstanceIdx == fBaseInstance + fInstanceCount - fNumSkippedInstances);
- if (currentAtlas) {
- this->addAtlasBatch(currentAtlas, pathInstanceIdx);
- }
-
- return pathInstanceIdx;
-}
-
-void CCPR::ClipPath::placePathInAtlas(GrCoverageCountingPathRenderer* ccpr,
- GrOnFlushResourceProvider* onFlushRP,
- GrCCPathParser* parser) {
- SkASSERT(!this->isUninitialized());
- SkASSERT(!fHasAtlas);
- parser->parseDeviceSpacePath(fDeviceSpacePath);
- fAtlas = ccpr->placeParsedPathInAtlas(onFlushRP, fAccessRect, fPathDevIBounds, &fAtlasOffsetX,
- &fAtlasOffsetY);
- SkDEBUGCODE(fHasAtlas = true);
-}
-
-GrCCAtlas* GrCoverageCountingPathRenderer::placeParsedPathInAtlas(
- GrOnFlushResourceProvider* onFlushRP,
- const SkIRect& clipIBounds,
- const SkIRect& pathIBounds,
- int16_t* atlasOffsetX,
- int16_t* atlasOffsetY) {
- using ScissorMode = GrCCPathParser::ScissorMode;
-
- ScissorMode scissorMode;
- SkIRect clippedPathIBounds;
- if (clipIBounds.contains(pathIBounds)) {
- clippedPathIBounds = pathIBounds;
- scissorMode = ScissorMode::kNonScissored;
- } else if (clippedPathIBounds.intersect(clipIBounds, pathIBounds)) {
- scissorMode = ScissorMode::kScissored;
- } else {
- fPerFlushPathParser->discardParsedPath();
- return nullptr;
- }
-
- SkIPoint16 atlasLocation;
- int h = clippedPathIBounds.height(), w = clippedPathIBounds.width();
- if (fPerFlushAtlases.empty() || !fPerFlushAtlases.back().addRect(w, h, &atlasLocation)) {
- if (!fPerFlushAtlases.empty()) {
- // The atlas is out of room and can't grow any bigger.
- auto coverageCountBatchID = fPerFlushPathParser->closeCurrentBatch();
- fPerFlushAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
- }
- fPerFlushAtlases.emplace_back(*onFlushRP->caps(), SkTMax(w, h));
- SkAssertResult(fPerFlushAtlases.back().addRect(w, h, &atlasLocation));
- }
-
- *atlasOffsetX = atlasLocation.x() - static_cast<int16_t>(clippedPathIBounds.left());
- *atlasOffsetY = atlasLocation.y() - static_cast<int16_t>(clippedPathIBounds.top());
- fPerFlushPathParser->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX,
- *atlasOffsetY);
-
- return &fPerFlushAtlases.back();
-}
-
-void CCPR::DrawPathsOp::onExecute(GrOpFlushState* flushState) {
- SkASSERT(fCCPR->fFlushing);
- SkASSERT(flushState->rtCommandBuffer());
-
- if (!fCCPR->fPerFlushResourcesAreValid) {
- return; // Setup failed.
- }
-
- SkASSERT(fBaseInstance >= 0); // Make sure setupResources has been called.
-
- GrPipeline::InitArgs initArgs;
- initArgs.fFlags = fSRGBFlags;
- initArgs.fProxy = flushState->drawOpArgs().fProxy;
- initArgs.fCaps = &flushState->caps();
- initArgs.fResourceProvider = flushState->resourceProvider();
- initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
- GrPipeline pipeline(initArgs, std::move(fProcessors), flushState->detachAppliedClip());
-
- int baseInstance = fBaseInstance;
-
- for (int i = 0; i < fAtlasBatches.count(); baseInstance = fAtlasBatches[i++].fEndInstanceIdx) {
- const AtlasBatch& batch = fAtlasBatches[i];
- SkASSERT(batch.fEndInstanceIdx > baseInstance);
-
- if (!batch.fAtlas->textureProxy()) {
- continue; // Atlas failed to allocate.
- }
-
- GrCCPathProcessor pathProc(flushState->resourceProvider(),
- sk_ref_sp(batch.fAtlas->textureProxy()), this->getFillType());
- pathProc.drawPaths(flushState, pipeline, fCCPR->fPerFlushIndexBuffer.get(),
- fCCPR->fPerFlushVertexBuffer.get(), fCCPR->fPerFlushInstanceBuffer.get(),
- baseInstance, batch.fEndInstanceIdx, this->bounds());
- }
-
- SkASSERT(baseInstance == fBaseInstance + fInstanceCount - fNumSkippedInstances);
+ fPerFlushResources = std::move(resources);
}
void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
int numOpListIDs) {
SkASSERT(fFlushing);
- fPerFlushAtlases.reset();
- fPerFlushPathParser.reset();
- fPerFlushInstanceBuffer.reset();
- fPerFlushVertexBuffer.reset();
- fPerFlushIndexBuffer.reset();
+ fPerFlushResources.reset();
// We wait to erase these until after flush, once Ops and FPs are done accessing their data.
for (const auto& iter : fFlushingRTPathIters) {
fRTPendingPathsMap.erase(iter);
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 0e9ed7b8b1..2eac93369c 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -8,180 +8,55 @@
#ifndef GrCoverageCountingPathRenderer_DEFINED
#define GrCoverageCountingPathRenderer_DEFINED
-#include <map>
-#include "GrAllocator.h"
-#include "GrOnFlushResourceProvider.h"
#include "GrPathRenderer.h"
+#include "GrRenderTargetOpList.h"
#include "SkTInternalLList.h"
-#include "ccpr/GrCCAtlas.h"
-#include "ccpr/GrCCPathParser.h"
-#include "ccpr/GrCCPathProcessor.h"
-#include "ops/GrDrawOp.h"
+#include "ccpr/GrCCClipPath.h"
+#include "ccpr/GrCCDrawPathsOp.h"
+#include <map>
+
+class GrCCPerFlushResources;
+
+/**
+ * Tracks all the paths in a single render target that will be drawn at next flush.
+ */
+struct GrCCRTPendingPaths {
+ ~GrCCRTPendingPaths() {
+ // Ensure there are no surviving DrawPathsOps with a dangling pointer into this class.
+ if (!fDrawOps.isEmpty()) {
+ SK_ABORT("GrCCDrawPathsOp(s) not deleted during flush");
+ }
+ // Clip lazy proxies also reference this class from their callbacks, but those callbacks
+ // are only invoked at flush time while we are still alive. (Unlike DrawPathsOps, that
+ // unregister themselves upon destruction.) So it shouldn't matter if any clip proxies
+ // are still around.
+ }
+
+ SkTInternalLList<GrCCDrawPathsOp> fDrawOps;
+ std::map<uint32_t, GrCCClipPath> fClipPaths;
+ GrSTAllocator<256, GrCCDrawPathsOp::SingleDraw> fDrawsAllocator;
+};
/**
* This is a path renderer that draws antialiased paths by counting coverage in an offscreen
- * buffer. (See GrCCCoverageProcessor, GrCCPathProcessor)
+ * buffer. (See GrCCCoverageProcessor, GrCCPathProcessor.)
*
* It also serves as the per-render-target tracker for pending path draws, and at the start of
* flush, it compiles GPU buffers and renders a "coverage count atlas" for the upcoming paths.
*/
class GrCoverageCountingPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
- struct RTPendingPaths;
-
public:
static bool IsSupported(const GrCaps&);
static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&,
bool drawCachablePaths);
-
- ~GrCoverageCountingPathRenderer() override {
- // Ensure no Ops exist that could have a dangling pointer back into this class.
- SkASSERT(fRTPendingPathsMap.empty());
- SkASSERT(0 == fPendingDrawOpsCount);
- }
-
- // This is the Op that ultimately draws a path into its final destination, using the atlas we
- // generate at flush time.
- class DrawPathsOp : public GrDrawOp {
- public:
- DEFINE_OP_CLASS_ID
- SK_DECLARE_INTERNAL_LLIST_INTERFACE(DrawPathsOp);
-
- DrawPathsOp(GrCoverageCountingPathRenderer*, const DrawPathArgs&, GrColor);
- ~DrawPathsOp() override;
-
- struct SingleDraw {
- SkIRect fClipIBounds;
- SkMatrix fMatrix;
- SkPath fPath;
- GrColor fColor;
- SingleDraw* fNext = nullptr;
- };
-
- const SingleDraw* head() const {
- SkASSERT(fInstanceCount >= 1);
- return &fHeadDraw;
- }
-
- SkDEBUGCODE(int numSkippedInstances_debugOnly() const { return fNumSkippedInstances; })
-
- // GrDrawOp overrides.
- const char* name() const override { return "GrCoverageCountingPathRenderer::DrawPathsOp"; }
- FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
- RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
- GrPixelConfigIsClamped) override;
- void wasRecorded(GrRenderTargetOpList*) override;
- bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
- void visitProxies(const VisitProxyFunc& func) const override {
- fProcessors.visitProxies(func);
- }
- void onPrepare(GrOpFlushState*) override {}
- void onExecute(GrOpFlushState*) override;
-
- int setupResources(GrOnFlushResourceProvider*,
- GrCCPathProcessor::Instance* pathInstanceData, int pathInstanceIdx);
-
- private:
- SkPath::FillType getFillType() const {
- SkASSERT(fInstanceCount >= 1);
- return fHeadDraw.fPath.getFillType();
- }
-
- struct AtlasBatch {
- const GrCCAtlas* fAtlas;
- int fEndInstanceIdx;
- };
-
- void addAtlasBatch(const GrCCAtlas* atlas, int endInstanceIdx) {
- SkASSERT(endInstanceIdx > fBaseInstance);
- SkASSERT(fAtlasBatches.empty() ||
- endInstanceIdx > fAtlasBatches.back().fEndInstanceIdx);
- fAtlasBatches.push_back() = {atlas, endInstanceIdx};
- }
-
- GrCoverageCountingPathRenderer* const fCCPR;
- const uint32_t fSRGBFlags;
- GrProcessorSet fProcessors;
- SingleDraw fHeadDraw;
- SingleDraw* fTailDraw;
- RTPendingPaths* fOwningRTPendingPaths;
- int fBaseInstance;
- SkDEBUGCODE(int fInstanceCount);
- SkDEBUGCODE(int fNumSkippedInstances);
- SkSTArray<1, AtlasBatch, true> fAtlasBatches;
-
- typedef GrDrawOp INHERITED;
- };
+ ~GrCoverageCountingPathRenderer() override;
// GrPathRenderer overrides.
StencilSupport onGetStencilSupport(const GrShape&) const override {
return GrPathRenderer::kNoSupport_StencilSupport;
}
CanDrawPath onCanDrawPath(const CanDrawPathArgs& args) const override;
- bool onDrawPath(const DrawPathArgs&) final;
-
- // These are keyed by SkPath generation ID, and store which device-space paths are accessed and
- // where by clip FPs in a given opList. A single ClipPath can be referenced by multiple FPs. At
- // flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
- class ClipPath {
- public:
- ClipPath() = default;
- ClipPath(const ClipPath&) = delete;
-
- ~ClipPath() {
- // Ensure no clip FPs exist with a dangling pointer back into this class.
- SkASSERT(!fAtlasLazyProxy || fAtlasLazyProxy->isUnique_debugOnly());
- // Ensure no lazy proxy callbacks exist with a dangling pointer back into this class.
- SkASSERT(fHasAtlasTransform);
- }
-
- bool isUninitialized() const { return !fAtlasLazyProxy; }
- void init(GrProxyProvider* proxyProvider,
- const SkPath& deviceSpacePath, const SkIRect& accessRect,
- int rtWidth, int rtHeight);
- void addAccess(const SkIRect& accessRect) {
- SkASSERT(!this->isUninitialized());
- fAccessRect.join(accessRect);
- }
-
- GrTextureProxy* atlasLazyProxy() const {
- SkASSERT(!this->isUninitialized());
- return fAtlasLazyProxy.get();
- }
- const SkPath& deviceSpacePath() const {
- SkASSERT(!this->isUninitialized());
- return fDeviceSpacePath;
- }
- const SkIRect& pathDevIBounds() const {
- SkASSERT(!this->isUninitialized());
- return fPathDevIBounds;
- }
- void placePathInAtlas(GrCoverageCountingPathRenderer*, GrOnFlushResourceProvider*,
- GrCCPathParser*);
-
- const SkVector& atlasScale() const {
- SkASSERT(fHasAtlasTransform);
- return fAtlasScale;
- }
- const SkVector& atlasTranslate() const {
- SkASSERT(fHasAtlasTransform);
- return fAtlasTranslate;
- }
-
- private:
- sk_sp<GrTextureProxy> fAtlasLazyProxy;
- SkPath fDeviceSpacePath;
- SkIRect fPathDevIBounds;
- SkIRect fAccessRect;
-
- const GrCCAtlas* fAtlas = nullptr;
- int16_t fAtlasOffsetX;
- int16_t fAtlasOffsetY;
- SkDEBUGCODE(bool fHasAtlas = false);
-
- SkVector fAtlasScale;
- SkVector fAtlasTranslate;
- SkDEBUGCODE(bool fHasAtlasTransform = false);
- };
+ bool onDrawPath(const DrawPathArgs&) override;
std::unique_ptr<GrFragmentProcessor> makeClipProcessor(GrProxyProvider*, uint32_t oplistID,
const SkPath& deviceSpacePath,
@@ -190,48 +65,38 @@ public:
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
- SkTArray<sk_sp<GrRenderTargetContext>>* results) override;
+ SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws) override;
void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
+#ifdef SK_DEBUG
+ bool isFlushing_debugOnly() const { return fFlushing; }
+ void incrDrawOpCount_debugOnly() { ++fNumOutstandingDrawOps; }
+ void decrDrawOpCount_debugOnly() { --fNumOutstandingDrawOps; }
+#endif
+
private:
- GrCoverageCountingPathRenderer(bool drawCachablePaths)
- : fDrawCachablePaths(drawCachablePaths) {}
-
- GrCCAtlas* placeParsedPathInAtlas(GrOnFlushResourceProvider*, const SkIRect& accessRect,
- const SkIRect& pathIBounds, int16_t* atlasOffsetX,
- int16_t* atlasOffsetY);
-
- struct RTPendingPaths {
- ~RTPendingPaths() {
- // Ensure there are no surviving DrawPathsOps with a dangling pointer into this class.
- if (!fDrawOps.isEmpty()) {
- SK_ABORT("CCPR DrawPathsOp(s) not deleted during flush");
- }
- // Clip lazy proxies also reference this class from their callbacks, but those callbacks
- // are only invoked at flush time while we are still alive. (Unlike DrawPathsOps, that
- // unregister themselves upon destruction.) So it shouldn't matter if any clip proxies
- // are still around.
- }
+ GrCoverageCountingPathRenderer(bool drawCachablePaths);
- SkTInternalLList<DrawPathsOp> fDrawOps;
- std::map<uint32_t, ClipPath> fClipPaths;
- GrSTAllocator<256, DrawPathsOp::SingleDraw> fDrawsAllocator;
- };
-
- // A map from render target ID to the individual render target's pending paths.
- std::map<uint32_t, RTPendingPaths> fRTPendingPathsMap;
- SkSTArray<4, std::map<uint32_t, RTPendingPaths>::iterator> fFlushingRTPathIters;
- SkDEBUGCODE(int fPendingDrawOpsCount = 0);
-
- sk_sp<const GrBuffer> fPerFlushIndexBuffer;
- sk_sp<const GrBuffer> fPerFlushVertexBuffer;
- sk_sp<GrBuffer> fPerFlushInstanceBuffer;
- sk_sp<GrCCPathParser> fPerFlushPathParser;
- GrSTAllocator<4, GrCCAtlas> fPerFlushAtlases;
- bool fPerFlushResourcesAreValid;
+ GrCCRTPendingPaths* lookupRTPendingPaths(GrRenderTargetOpList* opList) {
+ SkASSERT(!fFlushing);
+ return &fRTPendingPathsMap[opList->uniqueID()];
+ }
+
+ const GrCCPerFlushResources* getPerFlushResources() const {
+ SkASSERT(fFlushing);
+ return fPerFlushResources.get();
+ }
+
+ std::map<uint32_t, GrCCRTPendingPaths> fRTPendingPathsMap;
+ SkSTArray<4, std::map<uint32_t, GrCCRTPendingPaths>::iterator> fFlushingRTPathIters;
+ std::unique_ptr<GrCCPerFlushResources> fPerFlushResources;
SkDEBUGCODE(bool fFlushing = false);
+ SkDEBUGCODE(int fNumOutstandingDrawOps = 0);
const bool fDrawCachablePaths;
+
+ friend void GrCCDrawPathsOp::wasRecorded(GrRenderTargetOpList*); // For lookupRTPendingPaths.
+ friend void GrCCDrawPathsOp::onExecute(GrOpFlushState*); // For getPerFlushResources.
};
#endif