aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/batches
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu/batches')
-rw-r--r--src/gpu/batches/GrAAConvexPathRenderer.cpp19
-rw-r--r--src/gpu/batches/GrAADistanceFieldPathRenderer.cpp32
-rw-r--r--src/gpu/batches/GrAAHairLinePathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp14
-rw-r--r--src/gpu/batches/GrAAStrokeRectBatch.cpp4
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.cpp10
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.h8
-rw-r--r--src/gpu/batches/GrDefaultPathRenderer.cpp8
-rw-r--r--src/gpu/batches/GrDrawAtlasBatch.cpp2
-rw-r--r--src/gpu/batches/GrDrawAtlasBatch.h2
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.cpp2
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.h2
-rw-r--r--src/gpu/batches/GrNinePatch.cpp4
-rw-r--r--src/gpu/batches/GrNonAAStrokeRectBatch.cpp9
-rw-r--r--src/gpu/batches/GrTInstanceBatch.h2
-rw-r--r--src/gpu/batches/GrTessellatingPathRenderer.cpp6
-rw-r--r--src/gpu/batches/GrTestBatch.h4
-rw-r--r--src/gpu/batches/GrVertexBatch.h2
18 files changed, 70 insertions, 64 deletions
diff --git a/src/gpu/batches/GrAAConvexPathRenderer.cpp b/src/gpu/batches/GrAAConvexPathRenderer.cpp
index 79296e3c3e..c5b7c579b7 100644
--- a/src/gpu/batches/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAAConvexPathRenderer.cpp
@@ -778,7 +778,7 @@ private:
fBatch.fCanTweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
}
- void prepareLinesOnlyDraws(Target* target) {
+ void prepareLinesOnlyDraws(Target* target) const {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
// Setup GrGeometryProcessor
@@ -806,7 +806,7 @@ private:
for (int i = 0; i < instanceCount; i++) {
tess.rewind();
- Geometry& args = fGeoData[i];
+ const Geometry& args = fGeoData[i];
if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
continue;
@@ -842,7 +842,7 @@ private:
}
}
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
#ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
if (this->linesOnly()) {
this->prepareLinesOnlyDraws(target);
@@ -866,15 +866,22 @@ private:
// TODO generate all segments for all paths and use one vertex buffer
for (int i = 0; i < instanceCount; i++) {
- Geometry& args = fGeoData[i];
+ const Geometry& args = fGeoData[i];
// We use the fact that SkPath::transform path does subdivision based on
// perspective. Otherwise, we apply the view matrix when copying to the
// segment representation.
const SkMatrix* viewMatrix = &args.fViewMatrix;
+
+ // We avoid initializing the path unless we have to
+ const SkPath* pathPtr = &args.fPath;
+ SkTLazy<SkPath> tmpPath;
if (viewMatrix->hasPerspective()) {
- args.fPath.transform(*viewMatrix);
+ SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
+ tmpPathPtr->setIsVolatile(true);
+ tmpPathPtr->transform(*viewMatrix);
viewMatrix = &SkMatrix::I();
+ pathPtr = tmpPathPtr;
}
int vertexCount;
@@ -886,7 +893,7 @@ private:
SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
SkPoint fanPt;
- if (!get_segments(args.fPath, *viewMatrix, &segments, &fanPt, &vertexCount,
+ if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
&indexCount)) {
continue;
}
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
index 80fd54212c..0b14e4df4e 100644
--- a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -143,7 +143,6 @@ public:
uint32_t fGenID;
SkStrokeRec fStroke;
bool fAntiAlias;
- PathData* fPathData;
};
static GrDrawBatch* Create(const Geometry& geometry, GrColor color, const SkMatrix& viewMatrix,
@@ -183,7 +182,7 @@ private:
int fInstancesToFlush;
};
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
int instanceCount = fGeoData.count();
SkMatrix invert;
@@ -229,7 +228,7 @@ private:
flushInfo.fInstancesToFlush = 0;
for (int i = 0; i < instanceCount; i++) {
- Geometry& args = fGeoData[i];
+ const Geometry& args = fGeoData[i];
// get mip level
SkScalar maxScale = this->viewMatrix().getMaxScale();
@@ -247,22 +246,22 @@ private:
// check to see if path is cached
PathData::Key key(args.fGenID, desiredDimension, args.fStroke);
- args.fPathData = fPathCache->find(key);
- if (nullptr == args.fPathData || !atlas->hasID(args.fPathData->fID)) {
+ PathData* pathData = fPathCache->find(key);
+ if (nullptr == pathData || !atlas->hasID(pathData->fID)) {
// Remove the stale cache entry
- if (args.fPathData) {
- fPathCache->remove(args.fPathData->fKey);
- fPathList->remove(args.fPathData);
- delete args.fPathData;
+ if (pathData) {
+ fPathCache->remove(pathData->fKey);
+ fPathList->remove(pathData);
+ delete pathData;
}
SkScalar scale = desiredDimension/maxDim;
- args.fPathData = new PathData;
+ pathData = new PathData;
if (!this->addPathToAtlas(target,
dfProcessor,
this->pipeline(),
&flushInfo,
atlas,
- args.fPathData,
+ pathData,
args.fPath,
args.fGenID,
args.fStroke,
@@ -274,7 +273,7 @@ private:
}
}
- atlas->setLastUseToken(args.fPathData->fID, target->currentToken());
+ atlas->setLastUseToken(pathData->fID, target->currentToken());
// Now set vertices
intptr_t offset = reinterpret_cast<intptr_t>(vertices);
@@ -288,7 +287,7 @@ private:
vertexStride,
this->viewMatrix(),
args.fPath,
- args.fPathData);
+ pathData);
flushInfo.fInstancesToFlush++;
}
@@ -304,7 +303,6 @@ private:
fBatch.fColor = color;
fBatch.fViewMatrix = viewMatrix;
fGeoData.push_back(geometry);
- fGeoData.back().fPathData = nullptr;
fAtlas = atlas;
fPathCache = pathCache;
@@ -326,7 +324,7 @@ private:
const SkStrokeRec& stroke,
bool antiAlias,
uint32_t dimension,
- SkScalar scale) {
+ SkScalar scale) const {
const SkRect& bounds = path.getBounds();
// generate bounding rect for bitmap draw
@@ -443,7 +441,7 @@ private:
size_t vertexStride,
const SkMatrix& viewMatrix,
const SkPath& path,
- const PathData* pathData) {
+ const PathData* pathData) const {
GrTexture* texture = atlas->getTexture();
SkScalar dx = pathData->fBounds.fLeft;
@@ -476,7 +474,7 @@ private:
vertexStride);
}
- void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) {
+ void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
GrVertices vertices;
int maxInstancesPerDraw = flushInfo->fIndexBuffer->maxQuads();
vertices.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
index fd579f2e6f..94abe1c1ec 100644
--- a/src/gpu/batches/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
@@ -713,7 +713,7 @@ private:
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
- void onPrepareDraws(Target*) override;
+ void onPrepareDraws(Target*) const override;
typedef SkTArray<SkPoint, true> PtArray;
typedef SkTArray<int, true> IntArray;
@@ -791,7 +791,7 @@ private:
typedef GrVertexBatch INHERITED;
};
-void AAHairlineBatch::onPrepareDraws(Target* target) {
+void AAHairlineBatch::onPrepareDraws(Target* target) const {
// Setup the viewmatrix and localmatrix for the GrGeometryProcessor.
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
index 295bcb1f0c..4b03c5a3f6 100644
--- a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
@@ -162,7 +162,7 @@ private:
}
void draw(GrVertexBatch::Target* target, const GrPipeline* pipeline, int vertexCount,
- size_t vertexStride, void* vertices, int indexCount, uint16_t* indices) {
+ size_t vertexStride, void* vertices, int indexCount, uint16_t* indices) const {
if (vertexCount == 0 || indexCount == 0) {
return;
}
@@ -190,7 +190,7 @@ private:
target->draw(info);
}
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
// Setup GrGeometryProcessor
@@ -220,7 +220,7 @@ private:
uint8_t* vertices = (uint8_t*) sk_malloc_throw(maxVertices * vertexStride);
uint16_t* indices = (uint16_t*) sk_malloc_throw(maxIndices * sizeof(uint16_t));
for (int i = 0; i < instanceCount; i++) {
- Geometry& args = fGeoData[i];
+ const Geometry& args = fGeoData[i];
GrAAConvexTessellator tess(args.fStrokeWidth, args.fJoin, args.fMiterLimit);
if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
@@ -232,8 +232,8 @@ private:
if (indexCount + currentIndices > UINT16_MAX) {
// if we added the current instance, we would overflow the indices we can store in a
// uint16_t. Draw what we've got so far and reset.
- draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
- indices);
+ this->draw(target, this->pipeline(), vertexCount, vertexStride, vertices,
+ indexCount, indices);
vertexCount = 0;
indexCount = 0;
}
@@ -252,8 +252,8 @@ private:
vertexCount += currentVertices;
indexCount += currentIndices;
}
- draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
- indices);
+ this->draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
+ indices);
sk_free(vertices);
sk_free(indices);
}
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp
index 419964fb75..21c55f6a54 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -107,7 +107,7 @@ private:
bounds->join(geo.fDevOutsideAssist);
}
- void onPrepareDraws(Target*) override;
+ void onPrepareDraws(Target*) const override;
void initBatchTracker(const GrXPOverridesForBatch&) override;
AAStrokeRectBatch(const SkMatrix& viewMatrix,bool miterStroke)
@@ -182,7 +182,7 @@ void AAStrokeRectBatch::initBatchTracker(const GrXPOverridesForBatch& overrides)
fBatch.fCanTweakAlphaForCoverage = overrides.canTweakAlphaForCoverage();
}
-void AAStrokeRectBatch::onPrepareDraws(Target* target) {
+void AAStrokeRectBatch::onPrepareDraws(Target* target) const {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
SkAutoTUnref<const GrGeometryProcessor> gp(create_stroke_rect_gp(canTweakAlphaForCoverage,
diff --git a/src/gpu/batches/GrAtlasTextBatch.cpp b/src/gpu/batches/GrAtlasTextBatch.cpp
index ac5ec118c3..fa34036af6 100644
--- a/src/gpu/batches/GrAtlasTextBatch.cpp
+++ b/src/gpu/batches/GrAtlasTextBatch.cpp
@@ -131,7 +131,7 @@ inline void GrAtlasTextBatch::regenBlob(Target* target, FlushInfo* flushInfo, Bl
SkTypeface** typeface, GrFontScaler** scaler,
const SkDescriptor** desc, const GrGeometryProcessor* gp,
int glyphCount, size_t vertexStride,
- GrColor color, SkScalar transX, SkScalar transY) {
+ GrColor color, SkScalar transX, SkScalar transY) const {
static_assert(!regenGlyphs || regenTexCoords, "must regenTexCoords along regenGlyphs");
GrBatchTextStrike* strike = nullptr;
if (regenTexCoords) {
@@ -297,7 +297,7 @@ enum RegenMask {
#define REGEN_ARGS target, &flushInfo, blob, &run, &info, &cache, &typeface, &scaler, &desc, gp, \
glyphCount, vertexStride, args.fColor, args.fTransX, args.fTransY
-void GrAtlasTextBatch::onPrepareDraws(Target* target) {
+void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
// if we have RGB, then we won't have any SkShaders so no need to use a localmatrix.
// TODO actually only invert if we don't have RGBA
SkMatrix localMatrix;
@@ -363,7 +363,7 @@ void GrAtlasTextBatch::onPrepareDraws(Target* target) {
SkTypeface* typeface = nullptr;
for (int i = 0; i < fGeoCount; i++) {
- Geometry& args = fGeoData[i];
+ const Geometry& args = fGeoData[i];
Blob* blob = args.fBlob;
Run& run = blob->fRuns[args.fRun];
TextInfo& info = run.fSubRunInfo[args.fSubRun];
@@ -434,7 +434,7 @@ void GrAtlasTextBatch::onPrepareDraws(Target* target) {
this->flush(target, &flushInfo);
}
-void GrAtlasTextBatch::flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) {
+void GrAtlasTextBatch::flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
GrVertices vertices;
int maxGlyphsPerDraw = flushInfo->fIndexBuffer->maxQuads();
vertices.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
@@ -518,7 +518,7 @@ bool GrAtlasTextBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
// TODO trying to figure out why lcd is so whack
GrGeometryProcessor* GrAtlasTextBatch::setupDfProcessor(const SkMatrix& viewMatrix,
SkColor filteredColor,
- GrColor color, GrTexture* texture) {
+ GrColor color, GrTexture* texture) const {
GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kBilerp_FilterMode);
bool isLCD = this->isLCD();
// set up any flags
diff --git a/src/gpu/batches/GrAtlasTextBatch.h b/src/gpu/batches/GrAtlasTextBatch.h
index 1e6d953f7d..65fd07d7bb 100644
--- a/src/gpu/batches/GrAtlasTextBatch.h
+++ b/src/gpu/batches/GrAtlasTextBatch.h
@@ -138,7 +138,7 @@ private:
int fVertexOffset;
};
- void onPrepareDraws(Target* target) override;
+ void onPrepareDraws(Target* target) const override;
GrAtlasTextBatch() : INHERITED(ClassID()) {} // initialized in factory functions.
@@ -177,9 +177,9 @@ private:
TextInfo* info, SkGlyphCache** cache,
SkTypeface** typeface, GrFontScaler** scaler, const SkDescriptor** desc,
const GrGeometryProcessor* gp, int glyphCount, size_t vertexStride,
- GrColor color, SkScalar transX, SkScalar transY);
+ GrColor color, SkScalar transX, SkScalar transY) const;
- inline void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo);
+ inline void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const;
GrColor color() const { return fBatch.fColor; }
const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
@@ -191,7 +191,7 @@ private:
// TODO just use class params
// TODO trying to figure out why lcd is so whack
GrGeometryProcessor* setupDfProcessor(const SkMatrix& viewMatrix, SkColor filteredColor,
- GrColor color, GrTexture* texture);
+ GrColor color, GrTexture* texture) const;
struct BatchTracker {
GrColor fColor;
diff --git a/src/gpu/batches/GrDefaultPathRenderer.cpp b/src/gpu/batches/GrDefaultPathRenderer.cpp
index 23f9fc8178..c6ca1cafc7 100644
--- a/src/gpu/batches/GrDefaultPathRenderer.cpp
+++ b/src/gpu/batches/GrDefaultPathRenderer.cpp
@@ -251,7 +251,7 @@ private:
fBatch.fCoverageIgnored = !overrides.readsCoverage();
}
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
SkAutoTUnref<const GrGeometryProcessor> gp;
{
using namespace GrDefaultGeoProcFactory;
@@ -279,7 +279,7 @@ private:
// We will use index buffers if we have multiple paths or one path with multiple contours
bool isIndexed = instanceCount > 1;
for (int i = 0; i < instanceCount; i++) {
- Geometry& args = fGeoData[i];
+ const Geometry& args = fGeoData[i];
int contourCount;
maxVertices += GrPathUtils::worstCasePointCount(args.fPath, &contourCount,
@@ -341,7 +341,7 @@ private:
int vertexOffset = 0;
int indexOffset = 0;
for (int i = 0; i < instanceCount; i++) {
- Geometry& args = fGeoData[i];
+ const Geometry& args = fGeoData[i];
int vertexCnt = 0;
int indexCnt = 0;
@@ -431,7 +431,7 @@ private:
int* indexCnt,
const SkPath& path,
SkScalar srcSpaceTol,
- bool isIndexed) {
+ bool isIndexed) const {
{
SkScalar srcSpaceTolSqd = SkScalarMul(srcSpaceTol, srcSpaceTol);
diff --git a/src/gpu/batches/GrDrawAtlasBatch.cpp b/src/gpu/batches/GrDrawAtlasBatch.cpp
index 3ee8f88e89..dab5b4f7c2 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.cpp
+++ b/src/gpu/batches/GrDrawAtlasBatch.cpp
@@ -51,7 +51,7 @@ static const GrGeometryProcessor* set_vertex_attributes(bool hasColors,
return GrDefaultGeoProcFactory::Create(gpColor, coverage, localCoords, viewMatrix);
}
-void GrDrawAtlasBatch::onPrepareDraws(Target* target) {
+void GrDrawAtlasBatch::onPrepareDraws(Target* target) const {
// Setup geometry processor
SkAutoTUnref<const GrGeometryProcessor> gp(set_vertex_attributes(this->hasColors(),
this->color(),
diff --git a/src/gpu/batches/GrDrawAtlasBatch.h b/src/gpu/batches/GrDrawAtlasBatch.h
index 3c0c34834f..4e89523dcb 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.h
+++ b/src/gpu/batches/GrDrawAtlasBatch.h
@@ -45,7 +45,7 @@ public:
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
- void onPrepareDraws(Target*) override;
+ void onPrepareDraws(Target*) const override;
void initBatchTracker(const GrXPOverridesForBatch&) override;
diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp
index cc5ccbd001..cfbd24c9c5 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.cpp
+++ b/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -92,7 +92,7 @@ void GrDrawVerticesBatch::initBatchTracker(const GrXPOverridesForBatch& override
}
}
-void GrDrawVerticesBatch::onPrepareDraws(Target* target) {
+void GrDrawVerticesBatch::onPrepareDraws(Target* target) const {
bool hasLocalCoords = !fGeoData[0].fLocalCoords.isEmpty();
int colorOffset = -1, texOffset = -1;
SkAutoTUnref<const GrGeometryProcessor> gp(
diff --git a/src/gpu/batches/GrDrawVerticesBatch.h b/src/gpu/batches/GrDrawVerticesBatch.h
index 33647df861..9b2faf3d3d 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.h
+++ b/src/gpu/batches/GrDrawVerticesBatch.h
@@ -49,7 +49,7 @@ public:
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
- void onPrepareDraws(Target*) override;
+ void onPrepareDraws(Target*) const override;
void initBatchTracker(const GrXPOverridesForBatch&) override;
GrDrawVerticesBatch(const Geometry& geometry, GrPrimitiveType primitiveType,
diff --git a/src/gpu/batches/GrNinePatch.cpp b/src/gpu/batches/GrNinePatch.cpp
index b438919a87..59040cbbe9 100644
--- a/src/gpu/batches/GrNinePatch.cpp
+++ b/src/gpu/batches/GrNinePatch.cpp
@@ -85,7 +85,7 @@ public:
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
SkAutoTUnref<const GrGeometryProcessor> gp(create_gp(fOverrides.readsCoverage()));
if (!gp) {
SkDebugf("Couldn't create GrGeometryProcessor\n");
@@ -112,7 +112,7 @@ private:
intptr_t verts = reinterpret_cast<intptr_t>(vertices) +
i * kRectsPerInstance * kVertsPerRect * vertexStride;
- Geometry& geo = fGeoData[i];
+ const Geometry& geo = fGeoData[i];
SkNinePatchIter iter(fImageWidth, fImageHeight, geo.fCenter, geo.fDst);
SkRect srcR, dstR;
diff --git a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
index 2f80884b61..d036b90c27 100644
--- a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
@@ -70,6 +70,9 @@ public:
geometry.fRect = rect;
geometry.fStrokeWidth = strokeWidth;
geometry.fColor = color;
+
+ // Sort the rect for hairlines
+ geometry.fRect.sort();
}
void appendAndUpdateBounds(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
@@ -102,7 +105,7 @@ private:
}
}
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
SkAutoTUnref<const GrGeometryProcessor> gp;
{
using namespace GrDefaultGeoProcFactory;
@@ -121,7 +124,7 @@ private:
SkASSERT(vertexStride == sizeof(GrDefaultGeoProcFactory::PositionAttr));
- Geometry& args = fGeoData[0];
+ const Geometry& args = fGeoData[0];
int vertexCount = kVertsPerHairlineRect;
if (args.fStrokeWidth > 0) {
@@ -142,10 +145,8 @@ private:
SkPoint* vertex = reinterpret_cast<SkPoint*>(verts);
GrPrimitiveType primType;
-
if (args.fStrokeWidth > 0) {;
primType = kTriangleStrip_GrPrimitiveType;
- args.fRect.sort();
init_stroke_rect_strip(vertex, args.fRect, args.fStrokeWidth);
} else {
// hairline
diff --git a/src/gpu/batches/GrTInstanceBatch.h b/src/gpu/batches/GrTInstanceBatch.h
index d2d4d6c0a1..bf371689bd 100644
--- a/src/gpu/batches/GrTInstanceBatch.h
+++ b/src/gpu/batches/GrTInstanceBatch.h
@@ -89,7 +89,7 @@ public:
private:
GrTInstanceBatch() : INHERITED(ClassID()) {}
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
SkAutoTUnref<const GrGeometryProcessor> gp(Impl::CreateGP(this->seedGeometry(),
fOverrides));
if (!gp) {
diff --git a/src/gpu/batches/GrTessellatingPathRenderer.cpp b/src/gpu/batches/GrTessellatingPathRenderer.cpp
index 8cd411a49f..082a8dbdcf 100644
--- a/src/gpu/batches/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/batches/GrTessellatingPathRenderer.cpp
@@ -1418,7 +1418,7 @@ private:
int tessellate(GrUniqueKey* key,
GrResourceProvider* resourceProvider,
SkAutoTUnref<GrVertexBuffer>& vertexBuffer,
- bool canMapVB) {
+ bool canMapVB) const {
SkPath path;
GrStrokeInfo stroke(fStroke);
if (stroke.isDashed()) {
@@ -1521,7 +1521,7 @@ private:
return actualCount;
}
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
// construct a cache key from the path's genID and the view matrix
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
@@ -1545,7 +1545,7 @@ private:
screenSpaceTol, fViewMatrix, fPath.getBounds());
if (!cache_match(vertexBuffer.get(), tol, &actualCount)) {
bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
- actualCount = tessellate(&key, rp, vertexBuffer, canMapVB);
+ actualCount = this->tessellate(&key, rp, vertexBuffer, canMapVB);
}
if (actualCount == 0) {
diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h
index b070bbaf75..9427504d2e 100644
--- a/src/gpu/batches/GrTestBatch.h
+++ b/src/gpu/batches/GrTestBatch.h
@@ -60,7 +60,7 @@ protected:
const GrGeometryProcessor* geometryProcessor() const { return fGeometryProcessor; }
private:
- void onPrepareDraws(Target* target) override {
+ void onPrepareDraws(Target* target) const override {
target->initDraw(fGeometryProcessor, this->pipeline());
this->generateGeometry(target);
}
@@ -72,7 +72,7 @@ private:
return false;
}
- virtual void generateGeometry(Target*) = 0;
+ virtual void generateGeometry(Target*) const = 0;
struct BatchTracker {
GrColor fColor;
diff --git a/src/gpu/batches/GrVertexBatch.h b/src/gpu/batches/GrVertexBatch.h
index ba899cc3fc..d0daf59141 100644
--- a/src/gpu/batches/GrVertexBatch.h
+++ b/src/gpu/batches/GrVertexBatch.h
@@ -65,7 +65,7 @@ private:
void onPrepare(GrBatchFlushState* state) final;
void onDraw(GrBatchFlushState* state) final;
- virtual void onPrepareDraws(Target*) = 0;
+ virtual void onPrepareDraws(Target*) const = 0;
// A set of contiguous draws with no inline uploads between them that all use the same
// primitive processor. All the draws in a DrawArray share a primitive processor and use the