aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Chris Dalton <csmartdalton@google.com>2018-06-14 10:14:50 -0600
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-06-14 17:21:47 +0000
commit9414c96781d7f95a066b95261f333f0c0b46b39f (patch)
tree2f1831cb94e22a1f151dd907536a96e3061381c9
parent653f34da95f3717d048b1961760f09dc28138321 (diff)
ccpr: Generalize GrCCAtlas to work for cached atlases as well
Converts atlas offsets to SkIVector, adds a GrCCAtlasStack class, moves the Op that renders the atlases into GrCCPerFlushResources, etc. Bug: skia: Change-Id: I5110be8e74da709f3ce84bb6798ead572142d0fa Reviewed-on: https://skia-review.googlesource.com/134701 Commit-Queue: Chris Dalton <csmartdalton@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com>
-rw-r--r--include/private/GrCCClipPath.h3
-rw-r--r--src/gpu/GrRenderTargetContext.h2
-rw-r--r--src/gpu/ccpr/GrCCAtlas.cpp96
-rw-r--r--src/gpu/ccpr/GrCCAtlas.h69
-rw-r--r--src/gpu/ccpr/GrCCClipPath.cpp6
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.cpp10
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.h2
-rw-r--r--src/gpu/ccpr/GrCCPathParser.cpp46
-rw-r--r--src/gpu/ccpr/GrCCPathParser.h16
-rw-r--r--src/gpu/ccpr/GrCCPathProcessor.cpp8
-rw-r--r--src/gpu/ccpr/GrCCPathProcessor.h20
-rw-r--r--src/gpu/ccpr/GrCCPerFlushResources.cpp158
-rw-r--r--src/gpu/ccpr/GrCCPerFlushResources.h47
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp4
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.h2
15 files changed, 274 insertions, 215 deletions
diff --git a/include/private/GrCCClipPath.h b/include/private/GrCCClipPath.h
index d845e3e6df..7c39c45d90 100644
--- a/include/private/GrCCClipPath.h
+++ b/include/private/GrCCClipPath.h
@@ -69,8 +69,7 @@ private:
SkIRect fAccessRect;
const GrCCAtlas* fAtlas = nullptr;
- int16_t fAtlasOffsetX;
- int16_t fAtlasOffsetY;
+ SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
SkDEBUGCODE(bool fHasAtlas = false);
SkVector fAtlasScale;
diff --git a/src/gpu/GrRenderTargetContext.h b/src/gpu/GrRenderTargetContext.h
index d0b4a7d86f..783b351b06 100644
--- a/src/gpu/GrRenderTargetContext.h
+++ b/src/gpu/GrRenderTargetContext.h
@@ -404,7 +404,7 @@ private:
friend class GrDefaultPathRenderer; // for access to add[Mesh]DrawOp
friend class GrStencilAndCoverPathRenderer; // for access to add[Mesh]DrawOp
friend class GrTessellatingPathRenderer; // for access to add[Mesh]DrawOp
- friend class GrCCAtlas; // for access to addDrawOp
+ friend class GrCCPerFlushResources; // for access to addDrawOp
friend class GrCoverageCountingPathRenderer; // for access to addDrawOp
// for a unit test
friend void test_draw_op(GrContext*,
diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp
index 551b2a048e..cbf6993f04 100644
--- a/src/gpu/ccpr/GrCCAtlas.cpp
+++ b/src/gpu/ccpr/GrCCAtlas.cpp
@@ -7,19 +7,13 @@
#include "GrCCAtlas.h"
-#include "GrClip.h"
-#include "GrMemoryPool.h"
+#include "GrCaps.h"
#include "GrOnFlushResourceProvider.h"
-#include "GrSurfaceContextPriv.h"
#include "GrRectanizer_skyline.h"
#include "GrRenderTargetContext.h"
-#include "GrSurfaceContextPriv.h"
#include "GrTextureProxy.h"
#include "SkMakeUnique.h"
#include "SkMathPriv.h"
-#include "ccpr/GrCCCoverageProcessor.h"
-#include "ccpr/GrCCPathParser.h"
-#include "ops/GrDrawOp.h"
class GrCCAtlas::Node {
public:
@@ -50,50 +44,6 @@ private:
GrRectanizerSkyline fRectanizer;
};
-class GrCCAtlas::DrawCoverageCountOp : public GrDrawOp {
-public:
- DEFINE_OP_CLASS_ID
-
- static std::unique_ptr<GrDrawOp> Make(GrContext* context,
- sk_sp<const GrCCPathParser> parser,
- CoverageCountBatchID batchID,
- const SkISize& drawBounds) {
- return std::unique_ptr<GrDrawOp>(new DrawCoverageCountOp(std::move(parser),
- batchID, drawBounds));
- }
-
- // GrDrawOp interface.
- const char* name() const override { return "GrCCAtlas::DrawCoverageCountOp"; }
- FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
- RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
- GrPixelConfigIsClamped) override { return RequiresDstTexture::kNo; }
- bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override { return false; }
- void onPrepare(GrOpFlushState*) override {}
- void onExecute(GrOpFlushState* flushState) override {
- fParser->drawCoverageCount(flushState, fBatchID,
- SkIRect::MakeWH(fDrawBounds.width(), fDrawBounds.height()));
- }
-
-private:
- friend class GrOpMemoryPool; // for ctor
-
- DrawCoverageCountOp(sk_sp<const GrCCPathParser> parser, CoverageCountBatchID batchID,
- const SkISize& drawBounds)
- : INHERITED(ClassID())
- , fParser(std::move(parser))
- , fBatchID(batchID)
- , fDrawBounds(drawBounds) {
- this->setBounds(SkRect::MakeIWH(fDrawBounds.width(), fDrawBounds.height()),
- GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
- }
-
- const sk_sp<const GrCCPathParser> fParser;
- const CoverageCountBatchID fBatchID;
- const SkISize fDrawBounds;
-
- typedef GrDrawOp INHERITED;
-};
-
GrCCAtlas::GrCCAtlas(const Specs& specs)
: fMaxTextureSize(SkTMax(SkTMax(specs.fMinHeight, specs.fMinWidth),
specs.fMaxPreferredTextureSize)) {
@@ -121,17 +71,18 @@ GrCCAtlas::GrCCAtlas(const Specs& specs)
GrCCAtlas::~GrCCAtlas() {
}
-bool GrCCAtlas::addRect(int w, int h, SkIPoint16* loc) {
- // This can't be called anymore once setCoverageCountBatchID() has been called.
- SkASSERT(!fCoverageCountBatchID);
+bool GrCCAtlas::addRect(const SkIRect& devIBounds, SkIVector* offset) {
+ // This can't be called anymore once makeClearedTextureProxy() has been called.
SkASSERT(!fTextureProxy);
- if (!this->internalPlaceRect(w, h, loc)) {
+ SkIPoint16 location;
+ if (!this->internalPlaceRect(devIBounds.width(), devIBounds.height(), &location)) {
return false;
}
+ offset->set(location.x() - devIBounds.left(), location.y() - devIBounds.top());
- fDrawBounds.fWidth = SkTMax(fDrawBounds.width(), loc->x() + w);
- fDrawBounds.fHeight = SkTMax(fDrawBounds.height(), loc->y() + h);
+ fDrawBounds.fWidth = SkTMax(fDrawBounds.width(), location.x() + devIBounds.width());
+ fDrawBounds.fHeight = SkTMax(fDrawBounds.height(), location.y() + devIBounds.height());
return true;
}
@@ -161,21 +112,21 @@ bool GrCCAtlas::internalPlaceRect(int w, int h, SkIPoint16* loc) {
return true;
}
-sk_sp<GrRenderTargetContext> GrCCAtlas::finalize(GrOnFlushResourceProvider* onFlushRP,
- sk_sp<const GrCCPathParser> parser) {
- SkASSERT(fCoverageCountBatchID);
+sk_sp<GrRenderTargetContext> GrCCAtlas::initInternalTextureProxy(
+ GrOnFlushResourceProvider* onFlushRP, GrPixelConfig config) {
SkASSERT(!fTextureProxy);
// Caller should have cropped any paths to the destination render target instead of asking for
// an atlas larger than maxRenderTargetSize.
+ SkASSERT(SkTMax(fHeight, fWidth) <= fMaxTextureSize);
SkASSERT(fMaxTextureSize <= onFlushRP->caps()->maxRenderTargetSize());
GrSurfaceDesc desc;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = fWidth;
desc.fHeight = fHeight;
- desc.fConfig = kAlpha_half_GrPixelConfig;
+ desc.fConfig = config;
sk_sp<GrRenderTargetContext> rtc =
- onFlushRP->makeRenderTargetContext(desc, kTopLeft_GrSurfaceOrigin, nullptr, nullptr);
+ onFlushRP->makeRenderTargetContext(desc, kTextureOrigin, nullptr, nullptr);
if (!rtc) {
SkDebugf("WARNING: failed to allocate a %ix%i atlas. Some paths will not be drawn.\n",
fWidth, fHeight);
@@ -185,14 +136,19 @@ sk_sp<GrRenderTargetContext> GrCCAtlas::finalize(GrOnFlushResourceProvider* onFl
SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
rtc->clear(&clearRect, 0, GrRenderTargetContext::CanClearFullscreen::kYes);
- GrContext* context = rtc->surfPriv().getContext();
-
- std::unique_ptr<GrDrawOp> op = DrawCoverageCountOp::Make(context,
- std::move(parser),
- fCoverageCountBatchID,
- fDrawBounds);
- rtc->addDrawOp(GrNoClip(), std::move(op));
-
fTextureProxy = sk_ref_sp(rtc->asTextureProxy());
return rtc;
}
+
+GrCCAtlas* GrCCAtlasStack::addRect(const SkIRect& devIBounds, SkIVector* offset) {
+ GrCCAtlas* retiredAtlas = nullptr;
+ if (fAtlases.empty() || !fAtlases.back().addRect(devIBounds, offset)) {
+ // The retired atlas is out of room and can't grow any bigger.
+ retiredAtlas = !fAtlases.empty() ? &fAtlases.back() : nullptr;
+ fAtlases.emplace_back(fSpecs);
+ SkASSERT(devIBounds.width() <= fSpecs.fMinWidth);
+ SkASSERT(devIBounds.height() <= fSpecs.fMinHeight);
+ SkAssertResult(fAtlases.back().addRect(devIBounds, offset));
+ }
+ return retiredAtlas;
+}
diff --git a/src/gpu/ccpr/GrCCAtlas.h b/src/gpu/ccpr/GrCCAtlas.h
index b30bdbec6f..d4a07f54a7 100644
--- a/src/gpu/ccpr/GrCCAtlas.h
+++ b/src/gpu/ccpr/GrCCAtlas.h
@@ -8,12 +8,12 @@
#ifndef GrCCAtlas_DEFINED
#define GrCCAtlas_DEFINED
+#include "GrAllocator.h"
+#include "GrTypes.h"
+#include "GrTypesPriv.h"
#include "SkRefCnt.h"
#include "SkSize.h"
-class GrCaps;
-class GrCCPathParser;
-class GrDrawOp;
class GrOnFlushResourceProvider;
class GrRenderTargetContext;
class GrTextureProxy;
@@ -26,7 +26,8 @@ struct SkIPoint16;
*/
class GrCCAtlas {
public:
- using CoverageCountBatchID = int;
+ // As long as GrSurfaceOrigin exists, we just have to decide on one for the atlas texture.
+ static constexpr GrSurfaceOrigin kTextureOrigin = kTopLeft_GrSurfaceOrigin;
static constexpr int kPadding = 1; // Amount of padding below and to the right of each path.
// This struct encapsulates the minimum and desired requirements for an atlas, as well as an
@@ -45,23 +46,25 @@ public:
GrCCAtlas(const Specs&);
~GrCCAtlas();
- bool addRect(int devWidth, int devHeight, SkIPoint16* loc);
+ // Attempts to add a rect to the atlas. If successful, returns the integer offset from
+ // device-space pixels where the path will be drawn, to atlas pixels where its mask resides.
+ bool addRect(const SkIRect& devIBounds, SkIVector* atlasOffset);
const SkISize& drawBounds() { return fDrawBounds; }
- void setCoverageCountBatchID(CoverageCountBatchID batchID) {
- SkASSERT(!fCoverageCountBatchID);
- SkASSERT(!fTextureProxy);
- fCoverageCountBatchID = batchID;
- }
-
- sk_sp<GrRenderTargetContext> SK_WARN_UNUSED_RESULT finalize(GrOnFlushResourceProvider*,
- sk_sp<const GrCCPathParser>);
+ // This is an optional space for the caller to jot down which user-defined batch to use when
+ // they render the content of this atlas.
+ void setUserBatchID(int id) { SkASSERT(!fTextureProxy); fUserBatchID = id; }
+ int getUserBatchID() const { return fUserBatchID; }
+ // Creates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext that
+ // the caller may use to render the content. After this call, it is no longer valid to call
+ // addRect() or setUserBatchID().
+ sk_sp<GrRenderTargetContext> initInternalTextureProxy(GrOnFlushResourceProvider*,
+ GrPixelConfig);
GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
private:
class Node;
- class DrawCoverageCountOp;
bool internalPlaceRect(int w, int h, SkIPoint16* loc);
@@ -70,10 +73,46 @@ private:
std::unique_ptr<Node> fTopNode;
SkISize fDrawBounds = {0, 0};
- CoverageCountBatchID fCoverageCountBatchID SkDEBUGCODE(= 0);
+ int fUserBatchID;
sk_sp<GrTextureProxy> fTextureProxy;
};
+/**
+ * This class implements an unbounded stack of atlases. When the current atlas reaches the
+ * implementation-dependent max texture size, a new one is pushed to the back and we continue on.
+ */
+class GrCCAtlasStack {
+public:
+ GrCCAtlasStack(const GrCCAtlas::Specs& specs) : fSpecs(specs) {}
+
+ bool empty() const { return fAtlases.empty(); }
+ const GrCCAtlas& front() const { SkASSERT(!this->empty()); return fAtlases.front(); }
+ GrCCAtlas& front() { SkASSERT(!this->empty()); return fAtlases.front(); }
+ GrCCAtlas& current() { SkASSERT(!this->empty()); return fAtlases.back(); }
+
+ class Iter {
+ public:
+ Iter(GrCCAtlasStack& stack) : fImpl(&stack.fAtlases) {}
+ bool next() { return fImpl.next(); }
+ GrCCAtlas* operator->() const { return fImpl.get(); }
+ private:
+ typename GrTAllocator<GrCCAtlas>::Iter fImpl;
+ };
+
+ // Adds a rect to the current atlas and returns the offset from device space to atlas space.
+ // Call current() to get the atlas it was added to.
+ //
+ // If the return value is non-null, it means the given rect did not fit in the then-current
+ // atlas, so it was retired and a new one was added to the stack. The return value is the
+ // newly-retired atlas. The caller should call setUserBatchID() on the retired atlas before
+ // moving on.
+ GrCCAtlas* addRect(const SkIRect& devIBounds, SkIVector* offset);
+
+private:
+ const GrCCAtlas::Specs fSpecs;
+ GrSTAllocator<4, GrCCAtlas> fAtlases;
+};
+
inline void GrCCAtlas::Specs::accountForSpace(int width, int height) {
fMinWidth = SkTMax(width, fMinWidth);
fMinHeight = SkTMax(height, fMinHeight);
diff --git a/src/gpu/ccpr/GrCCClipPath.cpp b/src/gpu/ccpr/GrCCClipPath.cpp
index c98865765e..77674e4539 100644
--- a/src/gpu/ccpr/GrCCClipPath.cpp
+++ b/src/gpu/ccpr/GrCCClipPath.cpp
@@ -35,8 +35,8 @@ void GrCCClipPath::init(GrProxyProvider* proxyProvider,
SkASSERT(kTopLeft_GrSurfaceOrigin == textureProxy->origin());
fAtlasScale = {1.f / textureProxy->width(), 1.f / textureProxy->height()};
- fAtlasTranslate = {fAtlasOffsetX * fAtlasScale.x(),
- fAtlasOffsetY * fAtlasScale.y()};
+ fAtlasTranslate.set(fDevToAtlasOffset.fX * fAtlasScale.x(),
+ fDevToAtlasOffset.fY * fAtlasScale.y());
SkDEBUGCODE(fHasAtlasTransform = true);
return sk_ref_sp(textureProxy->priv().peekTexture());
@@ -65,6 +65,6 @@ void GrCCClipPath::renderPathInAtlas(GrCCPerFlushResources* resources,
SkASSERT(this->isInitialized());
SkASSERT(!fHasAtlas);
fAtlas = resources->renderDeviceSpacePathInAtlas(fAccessRect, fDeviceSpacePath, fPathDevIBounds,
- &fAtlasOffsetX, &fAtlasOffsetY);
+ &fDevToAtlasOffset);
SkDEBUGCODE(fHasAtlas = true);
}
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index f29fca2f04..16a2c663a5 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -114,10 +114,10 @@ void GrCCDrawPathsOp::setupResources(GrCCPerFlushResources* resources,
// second one rotated an additional 45 degrees. The path vertex shader uses these two
// bounding boxes to generate an octagon that circumscribes the path.
SkRect devBounds, devBounds45;
- int16_t atlasOffsetX, atlasOffsetY;
- GrCCAtlas* atlas = resources->renderPathInAtlas(draw.fLooseClippedIBounds, draw.fMatrix,
- draw.fPath, &devBounds, &devBounds45,
- &atlasOffsetX, &atlasOffsetY);
+ SkIVector devToAtlasOffset;
+ const GrCCAtlas* atlas = resources->renderPathInAtlas(draw.fLooseClippedIBounds,
+ draw.fMatrix, draw.fPath, &devBounds,
+ &devBounds45, &devToAtlasOffset);
if (!atlas) {
SkDEBUGCODE(++fNumSkippedInstances);
continue;
@@ -130,7 +130,7 @@ void GrCCDrawPathsOp::setupResources(GrCCPerFlushResources* resources,
}
resources->appendDrawPathInstance().set(draw.fPath.getFillType(), devBounds, devBounds45,
- atlasOffsetX, atlasOffsetY, draw.fColor);
+ devToAtlasOffset, draw.fColor);
}
SkASSERT(resources->nextPathInstanceIdx() == fBaseInstance + fNumDraws - fNumSkippedInstances);
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index b46039d3c8..87cd50e1e0 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -51,7 +51,7 @@ public:
private:
friend class GrOpMemoryPool;
- GrCCDrawPathsOp(const SkIRect& clippedDevIBounds, const SkMatrix&, const SkPath&,
+ GrCCDrawPathsOp(const SkIRect& looseClippedIBounds, const SkMatrix&, const SkPath&,
const SkRect& devBounds, GrPaint&&);
struct AtlasBatch {
diff --git a/src/gpu/ccpr/GrCCPathParser.cpp b/src/gpu/ccpr/GrCCPathParser.cpp
index b29bec7cb1..6f9b2240d7 100644
--- a/src/gpu/ccpr/GrCCPathParser.cpp
+++ b/src/gpu/ccpr/GrCCPathParser.cpp
@@ -165,10 +165,10 @@ void GrCCPathParser::endContourIfNeeded(bool insideContour) {
}
void GrCCPathParser::saveParsedPath(ScissorMode scissorMode, const SkIRect& clippedDevIBounds,
- int16_t atlasOffsetX, int16_t atlasOffsetY) {
+ const SkIVector& devToAtlasOffset) {
SkASSERT(fParsingPath);
- fPathsInfo.emplace_back(scissorMode, atlasOffsetX, atlasOffsetY);
+ fPathsInfo.emplace_back(scissorMode, devToAtlasOffset);
// Tessellate fans from very large and/or simple paths, in order to reduce overdraw.
int numVerbs = fGeometry.verbs().count() - fCurrPathVerbsIdx - 1;
@@ -254,7 +254,8 @@ void GrCCPathParser::saveParsedPath(ScissorMode scissorMode, const SkIRect& clip
if (ScissorMode::kScissored == scissorMode) {
fScissorSubBatches.push_back() = {fTotalPrimitiveCounts[(int)ScissorMode::kScissored],
- clippedDevIBounds.makeOffset(atlasOffsetX, atlasOffsetY)};
+ clippedDevIBounds.makeOffset(devToAtlasOffset.fX,
+ devToAtlasOffset.fY)};
}
SkDEBUGCODE(fParsingPath = false);
@@ -304,7 +305,7 @@ GrCCPathParser::CoverageCountBatchID GrCCPathParser::closeCurrentBatch() {
// Returns the next triangle instance after the final one emitted.
static TriPointInstance* emit_recursive_fan(const SkTArray<SkPoint, true>& pts,
SkTArray<int32_t, true>& indices, int firstIndex,
- int indexCount, const Sk2f& atlasOffset,
+ int indexCount, const Sk2f& devToAtlasOffset,
TriPointInstance out[]) {
if (indexCount < 3) {
return out;
@@ -313,33 +314,34 @@ static TriPointInstance* emit_recursive_fan(const SkTArray<SkPoint, true>& pts,
int32_t oneThirdCount = indexCount / 3;
int32_t twoThirdsCount = (2 * indexCount) / 3;
out++->set(pts[indices[firstIndex]], pts[indices[firstIndex + oneThirdCount]],
- pts[indices[firstIndex + twoThirdsCount]], atlasOffset);
+ pts[indices[firstIndex + twoThirdsCount]], devToAtlasOffset);
- out = emit_recursive_fan(pts, indices, firstIndex, oneThirdCount + 1, atlasOffset, out);
+ out = emit_recursive_fan(pts, indices, firstIndex, oneThirdCount + 1, devToAtlasOffset, out);
out = emit_recursive_fan(pts, indices, firstIndex + oneThirdCount,
- twoThirdsCount - oneThirdCount + 1, atlasOffset, out);
+ twoThirdsCount - oneThirdCount + 1, devToAtlasOffset, out);
int endIndex = firstIndex + indexCount;
int32_t oldValue = indices[endIndex];
indices[endIndex] = indices[firstIndex];
out = emit_recursive_fan(pts, indices, firstIndex + twoThirdsCount,
- indexCount - twoThirdsCount + 1, atlasOffset, out);
+ indexCount - twoThirdsCount + 1, devToAtlasOffset, out);
indices[endIndex] = oldValue;
return out;
}
static void emit_tessellated_fan(const GrTessellator::WindingVertex* vertices, int numVertices,
- const Sk2f& atlasOffset, TriPointInstance* triPointInstanceData,
+ const Sk2f& devToAtlasOffset,
+ TriPointInstance* triPointInstanceData,
QuadPointInstance* quadPointInstanceData,
GrCCGeometry::PrimitiveTallies* indices) {
for (int i = 0; i < numVertices; i += 3) {
if (1 == abs(vertices[i].fWinding)) {
triPointInstanceData[indices->fTriangles++].set(vertices[i].fPos, vertices[i + 1].fPos,
- vertices[i + 2].fPos, atlasOffset);
+ vertices[i + 2].fPos, devToAtlasOffset);
} else {
quadPointInstanceData[indices->fWeightedTriangles++].setW(
- vertices[i].fPos, vertices[i+1].fPos, vertices[i + 2].fPos, atlasOffset,
+ vertices[i].fPos, vertices[i+1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
static_cast<float>(abs(vertices[i].fWinding)));
}
}
@@ -400,8 +402,7 @@ bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
SkASSERT(quadPointInstanceData);
PathInfo* nextPathInfo = fPathsInfo.begin();
- float atlasOffsetX = 0.0, atlasOffsetY = 0.0;
- Sk2f atlasOffset;
+ Sk2f devToAtlasOffset;
PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]};
PrimitiveTallies* currIndices = nullptr;
SkSTArray<256, int32_t, true> currFan;
@@ -417,13 +418,12 @@ bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
case GrCCGeometry::Verb::kBeginPath:
SkASSERT(currFan.empty());
currIndices = &instanceIndices[(int)nextPathInfo->scissorMode()];
- atlasOffsetX = static_cast<float>(nextPathInfo->atlasOffsetX());
- atlasOffsetY = static_cast<float>(nextPathInfo->atlasOffsetY());
- atlasOffset = {atlasOffsetX, atlasOffsetY};
+ devToAtlasOffset = Sk2f(static_cast<float>(nextPathInfo->devToAtlasOffset().fX),
+ static_cast<float>(nextPathInfo->devToAtlasOffset().fY));
currFanIsTessellated = nextPathInfo->hasFanTessellation();
if (currFanIsTessellated) {
emit_tessellated_fan(nextPathInfo->fanTessellation(),
- nextPathInfo->fanTessellationCount(), atlasOffset,
+ nextPathInfo->fanTessellationCount(), devToAtlasOffset,
triPointInstanceData, quadPointInstanceData, currIndices);
}
++nextPathInfo;
@@ -446,7 +446,8 @@ bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
continue;
case GrCCGeometry::Verb::kMonotonicQuadraticTo:
- triPointInstanceData[currIndices->fQuadratics++].set(&pts[ptsIdx], atlasOffset);
+ triPointInstanceData[currIndices->fQuadratics++].set(&pts[ptsIdx],
+ devToAtlasOffset);
ptsIdx += 2;
if (!currFanIsTessellated) {
SkASSERT(!currFan.empty());
@@ -455,8 +456,8 @@ bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
continue;
case GrCCGeometry::Verb::kMonotonicCubicTo:
- quadPointInstanceData[currIndices->fCubics++].set(&pts[ptsIdx], atlasOffsetX,
- atlasOffsetY);
+ quadPointInstanceData[currIndices->fCubics++].set(&pts[ptsIdx], devToAtlasOffset[0],
+ devToAtlasOffset[1]);
ptsIdx += 3;
if (!currFanIsTessellated) {
SkASSERT(!currFan.empty());
@@ -466,7 +467,8 @@ bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
case GrCCGeometry::Verb::kMonotonicConicTo:
quadPointInstanceData[currIndices->fConics++].setW(
- &pts[ptsIdx], atlasOffset, fGeometry.getConicWeight(nextConicWeightIdx));
+ &pts[ptsIdx], devToAtlasOffset,
+ fGeometry.getConicWeight(nextConicWeightIdx));
ptsIdx += 2;
++nextConicWeightIdx;
if (!currFanIsTessellated) {
@@ -489,7 +491,7 @@ bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
// fanSize + log3(fanSize), but we approximate with log2.
currFan.push_back_n(SkNextLog2(fanSize));
SkDEBUGCODE(TriPointInstance* end =)
- emit_recursive_fan(pts, currFan, 0, fanSize, atlasOffset,
+ emit_recursive_fan(pts, currFan, 0, fanSize, devToAtlasOffset,
triPointInstanceData + currIndices->fTriangles);
currIndices->fTriangles += fanSize - 2;
SkASSERT(triPointInstanceData + currIndices->fTriangles == end);
diff --git a/src/gpu/ccpr/GrCCPathParser.h b/src/gpu/ccpr/GrCCPathParser.h
index b48a0b8adf..1c09b98024 100644
--- a/src/gpu/ccpr/GrCCPathParser.h
+++ b/src/gpu/ccpr/GrCCPathParser.h
@@ -9,7 +9,6 @@
#define GrCCPathParser_DEFINED
#include "GrMesh.h"
-#include "GrNonAtomicRef.h"
#include "SkPath.h"
#include "SkPathPriv.h"
#include "SkRect.h"
@@ -27,7 +26,7 @@ class SkPath;
* This class parses SkPaths into CCPR primitives in GPU buffers, then issues calls to draw their
* coverage counts.
*/
-class GrCCPathParser : public GrNonAtomicRef<GrCCPathParser> {
+class GrCCPathParser {
public:
// Indicates whether a path should enforce a scissor clip when rendering its mask. (Specified
// as an int because these values get used directly as indices into arrays.)
@@ -68,8 +67,8 @@ public:
// Commits the currently-parsed path from staging to the current batch, and specifies whether
// the mask should be rendered with a scissor in effect. Accepts an optional post-device-space
// translate for placement in an atlas.
- void saveParsedPath(ScissorMode, const SkIRect& clippedDevIBounds, int16_t atlasOffsetX,
- int16_t atlasOffsetY);
+ void saveParsedPath(ScissorMode, const SkIRect& clippedDevIBounds,
+ const SkIVector& devToAtlasOffset);
void discardParsedPath();
// Compiles the outstanding saved paths into a batch, and returns an ID that can be used to draw
@@ -89,12 +88,11 @@ private:
// Every kBeginPath verb has a corresponding PathInfo entry.
class PathInfo {
public:
- PathInfo(ScissorMode scissorMode, int16_t offsetX, int16_t offsetY)
- : fScissorMode(scissorMode), fAtlasOffsetX(offsetX), fAtlasOffsetY(offsetY) {}
+ PathInfo(ScissorMode scissorMode, const SkIVector& devToAtlasOffset)
+ : fScissorMode(scissorMode), fDevToAtlasOffset(devToAtlasOffset) {}
ScissorMode scissorMode() const { return fScissorMode; }
- int16_t atlasOffsetX() const { return fAtlasOffsetX; }
- int16_t atlasOffsetY() const { return fAtlasOffsetY; }
+ const SkIVector& devToAtlasOffset() const { return fDevToAtlasOffset; }
// An empty tessellation fan is also valid; we use negative count to denote not tessellated.
bool hasFanTessellation() const { return fFanTessellationCount >= 0; }
@@ -115,7 +113,7 @@ private:
private:
ScissorMode fScissorMode;
- int16_t fAtlasOffsetX, fAtlasOffsetY;
+ SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
int fFanTessellationCount = -1;
std::unique_ptr<const GrTessellator::WindingVertex[]> fFanTessellation;
};
diff --git a/src/gpu/ccpr/GrCCPathProcessor.cpp b/src/gpu/ccpr/GrCCPathProcessor.cpp
index 5edc45ff69..39d5ef2819 100644
--- a/src/gpu/ccpr/GrCCPathProcessor.cpp
+++ b/src/gpu/ccpr/GrCCPathProcessor.cpp
@@ -83,15 +83,15 @@ GrCCPathProcessor::GrCCPathProcessor(GrResourceProvider* resourceProvider,
GrSamplerState::WrapMode::kClamp, kFragment_GrShaderFlag) {
this->addInstanceAttrib("devbounds", kFloat4_GrVertexAttribType);
this->addInstanceAttrib("devbounds45", kFloat4_GrVertexAttribType);
- this->addInstanceAttrib("atlas_offset", kShort2_GrVertexAttribType);
+ this->addInstanceAttrib("dev_to_atlas_offset", kInt2_GrVertexAttribType);
this->addInstanceAttrib("color", kUByte4_norm_GrVertexAttribType);
SkASSERT(offsetof(Instance, fDevBounds) ==
this->getInstanceAttrib(InstanceAttribs::kDevBounds).offsetInRecord());
SkASSERT(offsetof(Instance, fDevBounds45) ==
this->getInstanceAttrib(InstanceAttribs::kDevBounds45).offsetInRecord());
- SkASSERT(offsetof(Instance, fAtlasOffset) ==
- this->getInstanceAttrib(InstanceAttribs::kAtlasOffset).offsetInRecord());
+ SkASSERT(offsetof(Instance, fDevToAtlasOffset) ==
+ this->getInstanceAttrib(InstanceAttribs::kDevToAtlasOffset).offsetInRecord());
SkASSERT(offsetof(Instance, fColor) ==
this->getInstanceAttrib(InstanceAttribs::kColor).offsetInRecord());
SkASSERT(sizeof(Instance) == this->getInstanceStride());
@@ -208,7 +208,7 @@ void GLSLPathProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
// Convert to atlas coordinates in order to do our texture lookup.
v->codeAppendf("float2 atlascoord = octocoord + float2(%s);",
- proc.getInstanceAttrib(InstanceAttribs::kAtlasOffset).name());
+ proc.getInstanceAttrib(InstanceAttribs::kDevToAtlasOffset).name());
if (kTopLeft_GrSurfaceOrigin == proc.atlasProxy()->origin()) {
v->codeAppendf("%s.xy = atlascoord * %s;", texcoord.vsOut(), atlasAdjust);
} else {
diff --git a/src/gpu/ccpr/GrCCPathProcessor.h b/src/gpu/ccpr/GrCCPathProcessor.h
index 9d97bad91a..9ea0a164a0 100644
--- a/src/gpu/ccpr/GrCCPathProcessor.h
+++ b/src/gpu/ccpr/GrCCPathProcessor.h
@@ -32,23 +32,23 @@ public:
enum class InstanceAttribs {
kDevBounds,
kDevBounds45,
- kAtlasOffset,
+ kDevToAtlasOffset,
kColor
};
static constexpr int kNumInstanceAttribs = 1 + (int)InstanceAttribs::kColor;
struct Instance {
- SkRect fDevBounds; // "right < left" indicates even-odd fill type.
- SkRect fDevBounds45; // Bounding box in "| 1 -1 | * devCoords" space.
- // | 1 1 |
- std::array<int16_t, 2> fAtlasOffset;
+ SkRect fDevBounds; // "right < left" indicates even-odd fill type.
+ SkRect fDevBounds45; // Bounding box in "| 1 -1 | * devCoords" space.
+ // | 1 1 |
+ SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
uint32_t fColor;
void set(SkPath::FillType, const SkRect& devBounds, const SkRect& devBounds45,
- int16_t atlasOffsetX, int16_t atlasOffsetY, uint32_t color);
+ const SkIVector& devToAtlasOffset, uint32_t color);
};
- GR_STATIC_ASSERT(4 * 10 == sizeof(Instance));
+ GR_STATIC_ASSERT(4 * 11 == sizeof(Instance));
static sk_sp<const GrBuffer> FindVertexBuffer(GrOnFlushResourceProvider*);
static sk_sp<const GrBuffer> FindIndexBuffer(GrOnFlushResourceProvider*);
@@ -87,8 +87,8 @@ private:
};
inline void GrCCPathProcessor::Instance::set(SkPath::FillType fillType, const SkRect& devBounds,
- const SkRect& devBounds45, int16_t atlasOffsetX,
- int16_t atlasOffsetY, uint32_t color) {
+ const SkRect& devBounds45,
+ const SkIVector& devToAtlasOffset, uint32_t color) {
if (SkPath::kEvenOdd_FillType == fillType) {
// "right < left" indicates even-odd fill type.
fDevBounds.setLTRB(devBounds.fRight, devBounds.fTop, devBounds.fLeft, devBounds.fBottom);
@@ -97,7 +97,7 @@ inline void GrCCPathProcessor::Instance::set(SkPath::FillType fillType, const Sk
fDevBounds = devBounds;
}
fDevBounds45 = devBounds45;
- fAtlasOffset = {{atlasOffsetX, atlasOffsetY}};
+ fDevToAtlasOffset = devToAtlasOffset;
fColor = color;
}
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index a80d789f3f..5b3dec0622 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -7,17 +7,69 @@
#include "GrCCPerFlushResources.h"
+#include "GrClip.h"
+#include "GrMemoryPool.h"
#include "GrOnFlushResourceProvider.h"
+#include "GrSurfaceContextPriv.h"
#include "GrRenderTargetContext.h"
-#include "SkIPoint16.h"
+#include "SkMakeUnique.h"
+using CoverageCountBatchID = GrCCPathParser::CoverageCountBatchID;
using PathInstance = GrCCPathProcessor::Instance;
+namespace {
+
+// Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
+class RenderAtlasOp : public GrDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ static std::unique_ptr<GrDrawOp> Make(GrContext* context,
+ sk_sp<const GrCCPerFlushResources> resources,
+ CoverageCountBatchID batchID, const SkISize& drawBounds) {
+ return std::unique_ptr<GrDrawOp>(new RenderAtlasOp(std::move(resources), batchID,
+ drawBounds));
+ }
+
+ // GrDrawOp interface.
+ const char* name() const override { return "RenderAtlasOp (CCPR)"; }
+ FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
+ RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*,
+ GrPixelConfigIsClamped) override { return RequiresDstTexture::kNo; }
+ bool onCombineIfPossible(GrOp* other, const GrCaps&) override {
+ SK_ABORT("Only expected one Op per CCPR atlas.");
+ return true;
+ }
+ void onPrepare(GrOpFlushState*) override {}
+
+ void onExecute(GrOpFlushState* flushState) override {
+ fResources->pathParser().drawCoverageCount(flushState, fBatchID, fDrawBounds);
+ }
+
+private:
+ friend class ::GrOpMemoryPool; // for ctor
+
+ RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, CoverageCountBatchID batchID,
+ const SkISize& drawBounds)
+ : GrDrawOp(ClassID())
+ , fResources(std::move(resources))
+ , fBatchID(batchID)
+ , fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) {
+ this->setBounds(SkRect::MakeIWH(fDrawBounds.width(), fDrawBounds.height()),
+ GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
+ }
+
+ const sk_sp<const GrCCPerFlushResources> fResources;
+ const CoverageCountBatchID fBatchID;
+ const SkIRect fDrawBounds;
+};
+
+}
+
GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
const GrCCPerFlushResourceSpecs& specs)
- : fPathParser(sk_make_sp<GrCCPathParser>(specs.fNumRenderedPaths + specs.fNumClipPaths,
- specs.fParsingPathStats))
- , fAtlasSpecs(specs.fAtlasSpecs)
+ : fPathParser(specs.fNumRenderedPaths + specs.fNumClipPaths, specs.fParsingPathStats)
+ , fAtlasStack(specs.fAtlasSpecs)
, fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
, fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
, fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
@@ -36,34 +88,42 @@ GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushR
}
fPathInstanceData = static_cast<PathInstance*>(fInstanceBuffer->map());
SkASSERT(fPathInstanceData);
- SkDEBUGCODE(fPathInstanceBufferCount = specs.fNumRenderedPaths);
+ SkDEBUGCODE(fEndPathInstance = specs.fNumRenderedPaths);
}
-GrCCAtlas* GrCCPerFlushResources::renderPathInAtlas(const SkIRect& clipIBounds, const SkMatrix& m,
- const SkPath& path, SkRect* devBounds,
- SkRect* devBounds45, int16_t* atlasOffsetX,
- int16_t* atlasOffsetY) {
+const GrCCAtlas* GrCCPerFlushResources::renderPathInAtlas(const SkIRect& clipIBounds,
+ const SkMatrix& m, const SkPath& path,
+ SkRect* devBounds, SkRect* devBounds45,
+ SkIVector* devToAtlasOffset) {
SkASSERT(this->isMapped());
+ SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
+
+ fPathParser.parsePath(m, path, devBounds, devBounds45);
+
SkIRect devIBounds;
- fPathParser->parsePath(m, path, devBounds, devBounds45);
devBounds->roundOut(&devIBounds);
- return this->placeParsedPathInAtlas(clipIBounds, devIBounds, atlasOffsetX, atlasOffsetY);
+
+ if (!this->placeParsedPathInAtlas(clipIBounds, devIBounds, devToAtlasOffset)) {
+ SkDEBUGCODE(--fEndPathInstance);
+ return nullptr; // Path was degenerate or clipped away.
+ }
+ return &fAtlasStack.current();
}
-GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(const SkIRect& clipIBounds,
- const SkPath& devPath,
- const SkIRect& devPathIBounds,
- int16_t* atlasOffsetX,
- int16_t* atlasOffsetY) {
+const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
+ const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
+ SkIVector* devToAtlasOffset) {
SkASSERT(this->isMapped());
- fPathParser->parseDeviceSpacePath(devPath);
- return this->placeParsedPathInAtlas(clipIBounds, devPathIBounds, atlasOffsetX, atlasOffsetY);
+ fPathParser.parseDeviceSpacePath(devPath);
+ if (!this->placeParsedPathInAtlas(clipIBounds, devPathIBounds, devToAtlasOffset)) {
+ return nullptr;
+ }
+ return &fAtlasStack.current();
}
-GrCCAtlas* GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
- const SkIRect& pathIBounds,
- int16_t* atlasOffsetX,
- int16_t* atlasOffsetY) {
+bool GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBounds,
+ const SkIRect& pathIBounds,
+ SkIVector* devToAtlasOffset) {
using ScissorMode = GrCCPathParser::ScissorMode;
ScissorMode scissorMode;
SkIRect clippedPathIBounds;
@@ -73,50 +133,48 @@ GrCCAtlas* GrCCPerFlushResources::placeParsedPathInAtlas(const SkIRect& clipIBou
} else if (clippedPathIBounds.intersect(clipIBounds, pathIBounds)) {
scissorMode = ScissorMode::kScissored;
} else {
- fPathParser->discardParsedPath();
- return nullptr;
+ fPathParser.discardParsedPath();
+ return false;
}
- SkIPoint16 atlasLocation;
- int h = clippedPathIBounds.height(), w = clippedPathIBounds.width();
- if (fAtlases.empty() || !fAtlases.back().addRect(w, h, &atlasLocation)) {
- if (!fAtlases.empty()) {
- // The atlas is out of room and can't grow any bigger.
- auto coverageCountBatchID = fPathParser->closeCurrentBatch();
- fAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
- }
- fAtlases.emplace_back(fAtlasSpecs);
- SkAssertResult(fAtlases.back().addRect(w, h, &atlasLocation));
+ if (GrCCAtlas* retiredAtlas = fAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) {
+ // We did not fit in the previous coverage count atlas and it was retired. Close the path
+ // parser's current batch (which does not yet include the path we just parsed). We will
+ // render this batch into the retired atlas during finalize().
+ CoverageCountBatchID batchID = fPathParser.closeCurrentBatch();
+ retiredAtlas->setUserBatchID(batchID);
}
-
- *atlasOffsetX = atlasLocation.x() - static_cast<int16_t>(clippedPathIBounds.left());
- *atlasOffsetY = atlasLocation.y() - static_cast<int16_t>(clippedPathIBounds.top());
- fPathParser->saveParsedPath(scissorMode, clippedPathIBounds, *atlasOffsetX, *atlasOffsetY);
-
- return &fAtlases.back();
+ fPathParser.saveParsedPath(scissorMode, clippedPathIBounds, *devToAtlasOffset);
+ return true;
}
bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
- SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws) {
+ SkTArray<sk_sp<GrRenderTargetContext>>* out) {
SkASSERT(this->isMapped());
+ SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
+
fInstanceBuffer->unmap();
fPathInstanceData = nullptr;
- if (!fAtlases.empty()) {
- auto coverageCountBatchID = fPathParser->closeCurrentBatch();
- fAtlases.back().setCoverageCountBatchID(coverageCountBatchID);
+ if (!fAtlasStack.empty()) {
+ CoverageCountBatchID batchID = fPathParser.closeCurrentBatch();
+ fAtlasStack.current().setUserBatchID(batchID);
}
- if (!fPathParser->finalize(onFlushRP)) {
+ // Build the GPU buffers to render path coverage counts. (This must not happen until after the
+ // final call to fPathParser.closeCurrentBatch().)
+ if (!fPathParser.finalize(onFlushRP)) {
SkDebugf("WARNING: failed to allocate GPU buffers for CCPR. No paths will be drawn.\n");
return false;
}
- // Draw the atlas(es).
- GrTAllocator<GrCCAtlas>::Iter atlasIter(&fAtlases);
- while (atlasIter.next()) {
- if (auto rtc = atlasIter.get()->finalize(onFlushRP, fPathParser)) {
- atlasDraws->push_back(std::move(rtc));
+ // Render the atlas(es).
+ for (GrCCAtlasStack::Iter atlas(fAtlasStack); atlas.next();) {
+ if (auto rtc = atlas->initInternalTextureProxy(onFlushRP, kAlpha_half_GrPixelConfig)) {
+ auto op = RenderAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
+ atlas->getUserBatchID(), atlas->drawBounds());
+ rtc->addDrawOp(GrNoClip(), std::move(op));
+ out->push_back(std::move(rtc));
}
}
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index 14e3a1c667..e920c06acc 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -8,7 +8,6 @@
#ifndef GrCCPerFlushResources_DEFINED
#define GrCCPerFlushResources_DEFINED
-#include "GrAllocator.h"
#include "GrNonAtomicRef.h"
#include "ccpr/GrCCAtlas.h"
#include "ccpr/GrCCPathParser.h"
@@ -38,42 +37,50 @@ public:
bool isMapped() const { return SkToBool(fPathInstanceData); }
- GrCCAtlas* renderPathInAtlas(const SkIRect& clipIBounds, const SkMatrix&, const SkPath&,
- SkRect* devBounds, SkRect* devBounds45, int16_t* offsetX,
- int16_t* offsetY);
- GrCCAtlas* renderDeviceSpacePathInAtlas(const SkIRect& clipIBounds, const SkPath& devPath,
- const SkIRect& devPathIBounds, int16_t* atlasOffsetX,
- int16_t* atlasOffsetY);
+ // Renders a path into a temporary atlas. See GrCCPathParser for a description of the arguments.
+ const GrCCAtlas* renderPathInAtlas(const SkIRect& clipIBounds, const SkMatrix&, const SkPath&,
+ SkRect* devBounds, SkRect* devBounds45,
+ SkIVector* devToAtlasOffset);
+ const GrCCAtlas* renderDeviceSpacePathInAtlas(const SkIRect& clipIBounds, const SkPath& devPath,
+ const SkIRect& devPathIBounds,
+ SkIVector* devToAtlasOffset);
+ // Returns the index in instanceBuffer() of the next instance that will be added by
+ // appendDrawPathInstance().
+ int nextPathInstanceIdx() const { return fNextPathInstanceIdx; }
+
+ // Appends an instance to instanceBuffer() that will draw a path to the destination render
+ // target. The caller is responsible to call set() on the returned instance, to keep track of
+ // its atlas and index (see nextPathInstanceIdx()), and to issue the actual draw call.
GrCCPathProcessor::Instance& appendDrawPathInstance() {
SkASSERT(this->isMapped());
- SkASSERT(fNextPathInstanceIdx < fPathInstanceBufferCount);
+ SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
return fPathInstanceData[fNextPathInstanceIdx++];
}
- int nextPathInstanceIdx() const { return fNextPathInstanceIdx; }
- bool finalize(GrOnFlushResourceProvider*, SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws);
+ // Finishes off the GPU buffers and renders the atlas(es).
+ bool finalize(GrOnFlushResourceProvider*, SkTArray<sk_sp<GrRenderTargetContext>>* out);
+ // Accessors used by draw calls, once the resources have been finalized.
+ const GrCCPathParser& pathParser() const { SkASSERT(!this->isMapped()); return fPathParser; }
const GrBuffer* indexBuffer() const { SkASSERT(!this->isMapped()); return fIndexBuffer.get(); }
const GrBuffer* vertexBuffer() const { SkASSERT(!this->isMapped()); return fVertexBuffer.get();}
GrBuffer* instanceBuffer() const { SkASSERT(!this->isMapped()); return fInstanceBuffer.get(); }
private:
- GrCCAtlas* placeParsedPathInAtlas(const SkIRect& clipIBounds, const SkIRect& pathIBounds,
- int16_t* atlasOffsetX, int16_t* atlasOffsetY);
+ bool placeParsedPathInAtlas(const SkIRect& clipIBounds, const SkIRect& pathIBounds,
+ SkIVector* devToAtlasOffset);
- const sk_sp<GrCCPathParser> fPathParser;
- const GrCCAtlas::Specs fAtlasSpecs;
+ GrCCPathParser fPathParser;
+ GrCCAtlasStack fAtlasStack;
- sk_sp<const GrBuffer> fIndexBuffer;
- sk_sp<const GrBuffer> fVertexBuffer;
- sk_sp<GrBuffer> fInstanceBuffer;
+ const sk_sp<const GrBuffer> fIndexBuffer;
+ const sk_sp<const GrBuffer> fVertexBuffer;
+ const sk_sp<GrBuffer> fInstanceBuffer;
GrCCPathProcessor::Instance* fPathInstanceData = nullptr;
int fNextPathInstanceIdx = 0;
- SkDEBUGCODE(int fPathInstanceBufferCount);
-
- GrSTAllocator<4, GrCCAtlas> fAtlases;
+ SkDEBUGCODE(int fEndPathInstance);
};
#endif
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 9ad945cad7..727649bdd9 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -179,7 +179,7 @@ std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipPro
void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
const uint32_t* opListIDs, int numOpListIDs,
- SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws) {
+ SkTArray<sk_sp<GrRenderTargetContext>>* out) {
SkASSERT(!fFlushing);
SkASSERT(fFlushingPaths.empty());
SkDEBUGCODE(fFlushing = true);
@@ -236,7 +236,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
SkASSERT(resources->nextPathInstanceIdx() == resourceSpecs.fNumRenderedPaths - numSkippedPaths);
// Allocate the atlases and create instance buffers to draw them.
- if (!resources->finalize(onFlushRP, atlasDraws)) {
+ if (!resources->finalize(onFlushRP, out)) {
return;
}
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 63e52a972e..abc824b552 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -67,7 +67,7 @@ public:
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
- SkTArray<sk_sp<GrRenderTargetContext>>* atlasDraws) override;
+ SkTArray<sk_sp<GrRenderTargetContext>>* out) override;
void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
private: