aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/GrAAConvexPathRenderer.cpp27
-rwxr-xr-xsrc/gpu/GrAADistanceFieldPathRenderer.cpp40
-rw-r--r--src/gpu/GrAAHairLinePathRenderer.cpp34
-rw-r--r--src/gpu/GrAALinearizingConvexPathRenderer.cpp22
-rw-r--r--src/gpu/GrAtlasTextContext.cpp37
-rw-r--r--src/gpu/GrBatchAtlas.cpp50
-rw-r--r--src/gpu/GrBatchAtlas.h11
-rw-r--r--src/gpu/GrBatchFlushState.cpp31
-rw-r--r--src/gpu/GrBatchFlushState.h189
-rw-r--r--src/gpu/GrBatchFontCache.cpp4
-rw-r--r--src/gpu/GrBatchFontCache.h13
-rw-r--r--src/gpu/GrBatchTarget.cpp64
-rw-r--r--src/gpu/GrBatchTarget.h168
-rw-r--r--src/gpu/GrBufferedDrawTarget.cpp2
-rw-r--r--src/gpu/GrCommandBuilder.cpp4
-rw-r--r--src/gpu/GrCommandBuilder.h8
-rwxr-xr-xsrc/gpu/GrContext.cpp2
-rw-r--r--src/gpu/GrDefaultPathRenderer.cpp18
-rw-r--r--src/gpu/GrImmediateDrawTarget.cpp10
-rw-r--r--src/gpu/GrImmediateDrawTarget.h3
-rw-r--r--src/gpu/GrInOrderCommandBuilder.cpp2
-rw-r--r--src/gpu/GrInOrderCommandBuilder.h2
-rw-r--r--src/gpu/GrOvalRenderer.cpp46
-rw-r--r--src/gpu/GrReorderCommandBuilder.cpp2
-rw-r--r--src/gpu/GrReorderCommandBuilder.h2
-rw-r--r--src/gpu/GrTargetCommands.cpp67
-rw-r--r--src/gpu/GrTargetCommands.h42
-rw-r--r--src/gpu/GrTessellatingPathRenderer.cpp10
-rw-r--r--src/gpu/batches/GrAAFillRectBatch.cpp12
-rw-r--r--src/gpu/batches/GrAAStrokeRectBatch.cpp11
-rw-r--r--src/gpu/batches/GrAAStrokeRectBatch.h6
-rw-r--r--src/gpu/batches/GrBWFillRectBatch.cpp12
-rw-r--r--src/gpu/batches/GrDrawAtlasBatch.cpp9
-rw-r--r--src/gpu/batches/GrDrawAtlasBatch.h3
-rw-r--r--src/gpu/batches/GrDrawBatch.h30
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.cpp14
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.h6
-rw-r--r--src/gpu/batches/GrStrokeRectBatch.cpp11
-rw-r--r--src/gpu/batches/GrStrokeRectBatch.h4
-rw-r--r--src/gpu/batches/GrTestBatch.h15
-rw-r--r--src/gpu/batches/GrVertexBatch.cpp59
-rw-r--r--src/gpu/batches/GrVertexBatch.h47
-rw-r--r--src/gpu/effects/GrDashingEffect.cpp10
43 files changed, 602 insertions, 557 deletions
diff --git a/src/gpu/GrAAConvexPathRenderer.cpp b/src/gpu/GrAAConvexPathRenderer.cpp
index 3a7d4d23d5..9ef506d88c 100644
--- a/src/gpu/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/GrAAConvexPathRenderer.cpp
@@ -9,7 +9,7 @@
#include "GrAAConvexPathRenderer.h"
#include "GrAAConvexTessellator.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrCaps.h"
#include "GrContext.h"
@@ -777,7 +777,7 @@ public:
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
- void generateGeometryLinesOnly(GrBatchTarget* batchTarget) {
+ void prepareLinesOnlyDraws(Target* target) {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
// Setup GrGeometryProcessor
@@ -790,7 +790,7 @@ public:
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
@@ -814,8 +814,8 @@ public:
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, tess.numPts(),
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
+ &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
return;
@@ -824,8 +824,7 @@ public:
const GrIndexBuffer* indexBuffer;
int firstIndex;
- uint16_t* idxs = batchTarget->makeIndexSpace(tess.numIndices(),
- &indexBuffer, &firstIndex);
+ uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
if (!idxs) {
SkDebugf("Could not allocate indices\n");
return;
@@ -838,14 +837,14 @@ public:
vertexBuffer, indexBuffer,
firstVertex, firstIndex,
tess.numPts(), tess.numIndices());
- batchTarget->draw(info);
+ target->draw(info);
}
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
#ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
if (this->linesOnly()) {
- this->generateGeometryLinesOnly(batchTarget);
+ this->prepareLinesOnlyDraws(target);
return;
}
#endif
@@ -862,7 +861,7 @@ public:
SkAutoTUnref<GrGeometryProcessor> quadProcessor(
QuadEdgeEffect::Create(this->color(), invert, this->usesLocalCoords()));
- batchTarget->initDraw(quadProcessor, this->pipeline());
+ target->initDraw(quadProcessor, this->pipeline());
// TODO generate all segments for all paths and use one vertex buffer
for (int i = 0; i < instanceCount; i++) {
@@ -895,7 +894,7 @@ public:
int firstVertex;
size_t vertexStride = quadProcessor->getVertexStride();
- QuadVertex* verts = reinterpret_cast<QuadVertex*>(batchTarget->makeVertSpace(
+ QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
vertexStride, vertexCount, &vertexBuffer, &firstVertex));
if (!verts) {
@@ -906,7 +905,7 @@ public:
const GrIndexBuffer* indexBuffer;
int firstIndex;
- uint16_t *idxs = batchTarget->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
if (!idxs) {
SkDebugf("Could not allocate indices\n");
return;
@@ -921,7 +920,7 @@ public:
const Draw& draw = draws[i];
vertices.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer,
firstVertex, firstIndex, draw.fVertexCnt, draw.fIndexCnt);
- batchTarget->draw(vertices);
+ target->draw(vertices);
firstVertex += draw.fVertexCnt;
firstIndex += draw.fIndexCnt;
}
diff --git a/src/gpu/GrAADistanceFieldPathRenderer.cpp b/src/gpu/GrAADistanceFieldPathRenderer.cpp
index 682ec55927..6bf217bd72 100755
--- a/src/gpu/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/GrAADistanceFieldPathRenderer.cpp
@@ -8,7 +8,7 @@
#include "GrAADistanceFieldPathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrContext.h"
#include "GrPipelineBuilder.h"
@@ -159,7 +159,7 @@ public:
int fInstancesToFlush;
};
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
int instanceCount = fGeoData.count();
SkMatrix invert;
@@ -183,7 +183,7 @@ public:
flags,
this->usesLocalCoords()));
- batchTarget->initDraw(dfProcessor, this->pipeline());
+ target->initDraw(dfProcessor, this->pipeline());
FlushInfo flushInfo;
@@ -192,12 +192,12 @@ public:
SkASSERT(vertexStride == 2 * sizeof(SkPoint));
const GrVertexBuffer* vertexBuffer;
- void* vertices = batchTarget->makeVertSpace(vertexStride,
- kVerticesPerQuad * instanceCount,
- &vertexBuffer,
- &flushInfo.fVertexOffset);
+ void* vertices = target->makeVertexSpace(vertexStride,
+ kVerticesPerQuad * instanceCount,
+ &vertexBuffer,
+ &flushInfo.fVertexOffset);
flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
- flushInfo.fIndexBuffer.reset(batchTarget->resourceProvider()->refQuadIndexBuffer());
+ flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
if (!vertices || !flushInfo.fIndexBuffer) {
SkDebugf("Could not allocate vertices\n");
return;
@@ -234,7 +234,7 @@ public:
}
SkScalar scale = desiredDimension/maxDim;
args.fPathData = SkNEW(PathData);
- if (!this->addPathToAtlas(batchTarget,
+ if (!this->addPathToAtlas(target,
dfProcessor,
this->pipeline(),
&flushInfo,
@@ -250,13 +250,13 @@ public:
}
}
- atlas->setLastUseToken(args.fPathData->fID, batchTarget->currentToken());
+ atlas->setLastUseToken(args.fPathData->fID, target->currentToken());
// Now set vertices
intptr_t offset = reinterpret_cast<intptr_t>(vertices);
offset += i * kVerticesPerQuad * vertexStride;
SkPoint* positions = reinterpret_cast<SkPoint*>(offset);
- this->writePathVertices(batchTarget,
+ this->writePathVertices(target,
atlas,
this->pipeline(),
dfProcessor,
@@ -268,7 +268,7 @@ public:
flushInfo.fInstancesToFlush++;
}
- this->flush(batchTarget, &flushInfo);
+ this->flush(target, &flushInfo);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
@@ -292,7 +292,7 @@ private:
viewMatrix.mapRect(&fBounds);
}
- bool addPathToAtlas(GrBatchTarget* batchTarget,
+ bool addPathToAtlas(GrVertexBatch::Target* target,
const GrGeometryProcessor* dfProcessor,
const GrPipeline* pipeline,
FlushInfo* flushInfo,
@@ -388,13 +388,13 @@ private:
// add to atlas
SkIPoint16 atlasLocation;
GrBatchAtlas::AtlasID id;
- bool success = atlas->addToAtlas(&id, batchTarget, width, height, dfStorage.get(),
+ bool success = atlas->addToAtlas(&id, target, width, height, dfStorage.get(),
&atlasLocation);
if (!success) {
- this->flush(batchTarget, flushInfo);
- batchTarget->initDraw(dfProcessor, pipeline);
+ this->flush(target, flushInfo);
+ target->initDraw(dfProcessor, pipeline);
- SkDEBUGCODE(success =) atlas->addToAtlas(&id, batchTarget, width, height,
+ SkDEBUGCODE(success =) atlas->addToAtlas(&id, target, width, height,
dfStorage.get(), &atlasLocation);
SkASSERT(success);
@@ -428,7 +428,7 @@ private:
return true;
}
- void writePathVertices(GrBatchTarget* target,
+ void writePathVertices(GrDrawBatch::Target* target,
GrBatchAtlas* atlas,
const GrPipeline* pipeline,
const GrGeometryProcessor* gp,
@@ -469,13 +469,13 @@ private:
vertexStride);
}
- void flush(GrBatchTarget* batchTarget, FlushInfo* flushInfo) {
+ void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) {
GrVertices vertices;
int maxInstancesPerDraw = flushInfo->fIndexBuffer->maxQuads();
vertices.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
flushInfo->fIndexBuffer, flushInfo->fVertexOffset, kVerticesPerQuad,
kIndicesPerQuad, flushInfo->fInstancesToFlush, maxInstancesPerDraw);
- batchTarget->draw(vertices);
+ target->draw(vertices);
flushInfo->fVertexOffset += kVerticesPerQuad * flushInfo->fInstancesToFlush;
flushInfo->fInstancesToFlush = 0;
}
diff --git a/src/gpu/GrAAHairLinePathRenderer.cpp b/src/gpu/GrAAHairLinePathRenderer.cpp
index b1be535172..c9ffb797ad 100644
--- a/src/gpu/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/GrAAHairLinePathRenderer.cpp
@@ -7,7 +7,7 @@
#include "GrAAHairLinePathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrCaps.h"
#include "GrContext.h"
@@ -709,11 +709,11 @@ public:
fBatch.fCoverage = fGeoData[0].fCoverage;
}
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
typedef SkTArray<SkPoint, true> PtArray;
typedef SkTArray<int, true> IntArray;
typedef SkTArray<float, true> FloatArray;
@@ -789,7 +789,7 @@ private:
SkSTArray<1, Geometry, true> fGeoData;
};
-void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void AAHairlineBatch::onPrepareDraws(Target* target) {
// Setup the viewmatrix and localmatrix for the GrGeometryProcessor.
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
@@ -826,7 +826,7 @@ void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
GrQuadEffect::Create(this->color(),
*geometryProcessorViewM,
kHairlineAA_GrProcessorEdgeType,
- batchTarget->caps(),
+ target->caps(),
*geometryProcessorLocalM,
this->usesLocalCoords(),
this->coverage()));
@@ -835,7 +835,7 @@ void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
GrConicEffect::Create(this->color(),
*geometryProcessorViewM,
kHairlineAA_GrProcessorEdgeType,
- batchTarget->caps(),
+ target->caps(),
*geometryProcessorLocalM,
this->usesLocalCoords(),
this->coverage()));
@@ -861,8 +861,8 @@ void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
// do lines first
if (lineCount) {
SkAutoTUnref<const GrIndexBuffer> linesIndexBuffer(
- ref_lines_index_buffer(batchTarget->resourceProvider()));
- batchTarget->initDraw(lineGP, this->pipeline());
+ ref_lines_index_buffer(target->resourceProvider()));
+ target->initDraw(lineGP, this->pipeline());
const GrVertexBuffer* vertexBuffer;
int firstVertex;
@@ -870,7 +870,7 @@ void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
size_t vertexStride = lineGP->getVertexStride();
int vertexCount = kLineSegNumVertices * lineCount;
LineVertex* verts = reinterpret_cast<LineVertex*>(
- batchTarget->makeVertSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex));
+ target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex));
if (!verts|| !linesIndexBuffer) {
SkDebugf("Could not allocate vertices\n");
@@ -888,7 +888,7 @@ void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
vertices.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, linesIndexBuffer,
firstVertex, kLineSegNumVertices, kIdxsPerLineSeg, lineCount,
kLineSegsNumInIdxBuffer);
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
}
@@ -897,12 +897,12 @@ void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
int firstVertex;
SkAutoTUnref<const GrIndexBuffer> quadsIndexBuffer(
- ref_quads_index_buffer(batchTarget->resourceProvider()));
+ ref_quads_index_buffer(target->resourceProvider()));
size_t vertexStride = sizeof(BezierVertex);
int vertexCount = kQuadNumVertices * quadCount + kQuadNumVertices * conicCount;
- void *vertices = batchTarget->makeVertSpace(vertexStride, vertexCount,
- &vertexBuffer, &firstVertex);
+ void *vertices = target->makeVertexSpace(vertexStride, vertexCount,
+ &vertexBuffer, &firstVertex);
if (!vertices || !quadsIndexBuffer) {
SkDebugf("Could not allocate vertices\n");
@@ -924,27 +924,27 @@ void AAHairlineBatch::generateGeometry(GrBatchTarget* batchTarget) {
}
if (quadCount > 0) {
- batchTarget->initDraw(quadGP, this->pipeline());
+ target->initDraw(quadGP, this->pipeline());
{
GrVertices verts;
verts.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
firstVertex, kQuadNumVertices, kIdxsPerQuad, quadCount,
kQuadsNumInIdxBuffer);
- batchTarget->draw(verts);
+ target->draw(verts);
firstVertex += quadCount * kQuadNumVertices;
}
}
if (conicCount > 0) {
- batchTarget->initDraw(conicGP, this->pipeline());
+ target->initDraw(conicGP, this->pipeline());
{
GrVertices verts;
verts.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
firstVertex, kQuadNumVertices, kIdxsPerQuad, conicCount,
kQuadsNumInIdxBuffer);
- batchTarget->draw(verts);
+ target->draw(verts);
}
}
}
diff --git a/src/gpu/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/GrAALinearizingConvexPathRenderer.cpp
index d1dc00dbed..c7b1da0627 100644
--- a/src/gpu/GrAALinearizingConvexPathRenderer.cpp
+++ b/src/gpu/GrAALinearizingConvexPathRenderer.cpp
@@ -9,7 +9,7 @@
#include "GrAALinearizingConvexPathRenderer.h"
#include "GrAAConvexTessellator.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrContext.h"
#include "GrDefaultGeoProcFactory.h"
@@ -155,7 +155,7 @@ public:
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
- void draw(GrBatchTarget* batchTarget, const GrPipeline* pipeline, int vertexCount,
+ void draw(GrVertexBatch::Target* target, const GrPipeline* pipeline, int vertexCount,
size_t vertexStride, void* vertices, int indexCount, uint16_t* indices) {
if (vertexCount == 0 || indexCount == 0) {
return;
@@ -163,8 +163,8 @@ public:
const GrVertexBuffer* vertexBuffer;
GrVertices info;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, vertexCount, &vertexBuffer,
- &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
+ &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
return;
@@ -173,7 +173,7 @@ public:
const GrIndexBuffer* indexBuffer;
int firstIndex;
- uint16_t* idxs = batchTarget->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ uint16_t* idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
if (!idxs) {
SkDebugf("Could not allocate indices\n");
return;
@@ -181,10 +181,10 @@ public:
memcpy(idxs, indices, indexCount * sizeof(uint16_t));
info.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer, firstVertex,
firstIndex, vertexCount, indexCount);
- batchTarget->draw(info);
+ target->draw(info);
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
// Setup GrGeometryProcessor
@@ -197,7 +197,7 @@ public:
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
@@ -226,8 +226,8 @@ public:
if (indexCount + currentIndices > UINT16_MAX) {
// if we added the current instance, we would overflow the indices we can store in a
// uint16_t. Draw what we've got so far and reset.
- draw(batchTarget, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
- indices);
+ draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
+ indices);
vertexCount = 0;
indexCount = 0;
}
@@ -246,7 +246,7 @@ public:
vertexCount += currentVertices;
indexCount += currentIndices;
}
- draw(batchTarget, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
+ draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
indices);
free(vertices);
free(indices);
diff --git a/src/gpu/GrAtlasTextContext.cpp b/src/gpu/GrAtlasTextContext.cpp
index b6e0acc58f..fbfb2e1c94 100644
--- a/src/gpu/GrAtlasTextContext.cpp
+++ b/src/gpu/GrAtlasTextContext.cpp
@@ -7,7 +7,7 @@
#include "GrAtlasTextContext.h"
#include "GrBatchFontCache.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrBlurUtils.h"
#include "GrDefaultGeoProcFactory.h"
@@ -1535,7 +1535,7 @@ public:
int fVertexOffset;
};
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// if we have RGB, then we won't have any SkShaders so no need to use a localmatrix.
// TODO actually only invert if we don't have RGBA
SkMatrix localMatrix;
@@ -1575,17 +1575,17 @@ public:
get_vertex_stride_df(maskFormat, isLCD) :
get_vertex_stride(maskFormat)));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int glyphCount = this->numGlyphs();
const GrVertexBuffer* vertexBuffer;
- void* vertices = batchTarget->makeVertSpace(vertexStride,
- glyphCount * kVerticesPerGlyph,
- &vertexBuffer,
- &flushInfo.fVertexOffset);
+ void* vertices = target->makeVertexSpace(vertexStride,
+ glyphCount * kVerticesPerGlyph,
+ &vertexBuffer,
+ &flushInfo.fVertexOffset);
flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
- flushInfo.fIndexBuffer.reset(batchTarget->resourceProvider()->refQuadIndexBuffer());
+ flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
if (!vertices || !flushInfo.fVertexBuffer) {
SkDebugf("Could not allocate vertices\n");
return;
@@ -1689,13 +1689,12 @@ public:
//SkASSERT(glyph->fMaskFormat == this->maskFormat());
if (!fFontCache->hasGlyph(glyph) &&
- !strike->addGlyphToAtlas(batchTarget, glyph, scaler, skGlyph,
- maskFormat)) {
- this->flush(batchTarget, &flushInfo);
- batchTarget->initDraw(gp, this->pipeline());
+ !strike->addGlyphToAtlas(target, glyph, scaler, skGlyph, maskFormat)) {
+ this->flush(target, &flushInfo);
+ target->initDraw(gp, this->pipeline());
brokenRun = glyphIdx > 0;
- SkDEBUGCODE(bool success =) strike->addGlyphToAtlas(batchTarget,
+ SkDEBUGCODE(bool success =) strike->addGlyphToAtlas(target,
glyph,
scaler,
skGlyph,
@@ -1703,7 +1702,7 @@ public:
SkASSERT(success);
}
fFontCache->addGlyphToBulkAndSetUseToken(&info.fBulkUseToken, glyph,
- batchTarget->currentToken());
+ target->currentToken());
// Texture coords are the last vertex attribute so we get a pointer to the
// first one and then map with stride in regenerateTextureCoords
@@ -1747,9 +1746,7 @@ public:
// set use tokens for all of the glyphs in our subrun. This is only valid if we
// have a valid atlas generation
- fFontCache->setUseTokenBulk(info.fBulkUseToken,
- batchTarget->currentToken(),
- maskFormat);
+ fFontCache->setUseTokenBulk(info.fBulkUseToken, target->currentToken(), maskFormat);
}
// now copy all vertices
@@ -1762,7 +1759,7 @@ public:
if (cache) {
SkGlyphCache::AttachCache(cache);
}
- this->flush(batchTarget, &flushInfo);
+ this->flush(target, &flushInfo);
}
// to avoid even the initial copy of the struct, we have a getter for the first item which
@@ -1875,14 +1872,14 @@ private:
}
}
- void flush(GrBatchTarget* batchTarget, FlushInfo* flushInfo) {
+ void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) {
GrVertices vertices;
int maxGlyphsPerDraw = flushInfo->fIndexBuffer->maxQuads();
vertices.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
flushInfo->fIndexBuffer, flushInfo->fVertexOffset,
kVerticesPerGlyph, kIndicesPerGlyph, flushInfo->fGlyphsToFlush,
maxGlyphsPerDraw);
- batchTarget->draw(vertices);
+ target->draw(vertices);
flushInfo->fVertexOffset += kVerticesPerGlyph * flushInfo->fGlyphsToFlush;
flushInfo->fGlyphsToFlush = 0;
}
diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp
index 3ce157c710..f75df8bf12 100644
--- a/src/gpu/GrBatchAtlas.cpp
+++ b/src/gpu/GrBatchAtlas.cpp
@@ -6,8 +6,7 @@
*/
#include "GrBatchAtlas.h"
-#include "GrBatchTarget.h"
-#include "GrGpu.h"
+#include "GrBatchFlushState.h"
#include "GrRectanizer.h"
#include "GrTracing.h"
#include "GrVertexBuffer.h"
@@ -32,8 +31,6 @@ static GrBatchAtlas::AtlasID create_id(uint32_t index, uint64_t generation) {
class BatchPlot : public SkRefCnt {
public:
- typedef GrBatchAtlas::BatchToken BatchToken;
-
SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
// index() refers to the index of the plot in the owning GrAtlas's plot array. genID() is a
@@ -82,18 +79,18 @@ public:
// to issue a new upload even if we update the cpu backing store. We use lastref to determine
// when we can evict a plot from the cache, ie if the last ref has already flushed through
// the gpu then we can reuse the plot
- BatchToken lastUploadToken() const { return fLastUpload; }
- BatchToken lastUseToken() const { return fLastUse; }
- void setLastUploadToken(BatchToken batchToken) {
+ GrBatchToken lastUploadToken() const { return fLastUpload; }
+ GrBatchToken lastUseToken() const { return fLastUse; }
+ void setLastUploadToken(GrBatchToken batchToken) {
SkASSERT(batchToken >= fLastUpload);
fLastUpload = batchToken;
}
- void setLastUseToken(BatchToken batchToken) {
+ void setLastUseToken(GrBatchToken batchToken) {
SkASSERT(batchToken >= fLastUse);
fLastUse = batchToken;
}
- void uploadToTexture(GrBatchTarget::TextureUploader uploader) {
+ void uploadToTexture(GrBatchUploader::TextureUploader* uploader) {
// We should only be issuing uploads if we are in fact dirty
SkASSERT(fDirty && fData && fTexture);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture");
@@ -101,10 +98,10 @@ public:
const unsigned char* dataPtr = fData;
dataPtr += rowBytes * fDirtyRect.fTop;
dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
- uploader.writeTexturePixels(fTexture,
- fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
- fDirtyRect.width(), fDirtyRect.height(),
- fTexture->config(), dataPtr, rowBytes);
+ uploader->writeTexturePixels(fTexture,
+ fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
+ fDirtyRect.width(), fDirtyRect.height(),
+ fTexture->config(), dataPtr, rowBytes);
fDirtyRect.setEmpty();
SkDEBUGCODE(fDirty = false;)
}
@@ -175,8 +172,8 @@ private:
fTexture = texture;
}
- BatchToken fLastUpload;
- BatchToken fLastUse;
+ GrBatchToken fLastUpload;
+ GrBatchToken fLastUse;
uint32_t fIndex;
uint64_t fGenID;
@@ -201,7 +198,7 @@ private:
////////////////////////////////////////////////////////////////////////////////
-class GrPlotUploader : public GrBatchTarget::Uploader {
+class GrPlotUploader : public GrBatchUploader {
public:
GrPlotUploader(BatchPlot* plot)
: INHERITED(plot->lastUploadToken())
@@ -209,14 +206,14 @@ public:
SkASSERT(plot);
}
- void upload(GrBatchTarget::TextureUploader uploader) override {
+ void upload(TextureUploader* uploader) override {
fPlot->uploadToTexture(uploader);
}
private:
SkAutoTUnref<BatchPlot> fPlot;
- typedef GrBatchTarget::Uploader INHERITED;
+ typedef GrBatchUploader INHERITED;
};
///////////////////////////////////////////////////////////////////////////////
@@ -273,21 +270,21 @@ void GrBatchAtlas::makeMRU(BatchPlot* plot) {
fPlotList.addToHead(plot);
}
-inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, BatchPlot* plot) {
+inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, BatchPlot* plot) {
this->makeMRU(plot);
// If our most recent upload has already occurred then we have to insert a new
// upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
// This new update will piggy back on that previously scheduled update.
- if (batchTarget->isIssued(plot->lastUploadToken())) {
- plot->setLastUploadToken(batchTarget->asapToken());
+ if (target->hasTokenBeenFlushed(plot->lastUploadToken())) {
+ plot->setLastUploadToken(target->asapToken());
SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot)));
- batchTarget->upload(uploader);
+ target->upload(uploader);
}
*id = plot->id();
}
-bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget,
+bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* batchTarget,
int width, int height, const void* image, SkIPoint16* loc) {
// We should already have a texture, TODO clean this up
SkASSERT(fTexture &&
@@ -311,7 +308,7 @@ bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget,
plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart);
plot = plotIter.get();
SkASSERT(plot);
- if (batchTarget->isIssued(plot->lastUseToken())) {
+ if (batchTarget->hasTokenBeenFlushed(plot->lastUseToken())) {
this->processEviction(plot->id());
plot->resetRects();
SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width);
@@ -362,7 +359,7 @@ bool GrBatchAtlas::hasID(AtlasID id) {
return fPlotArray[index]->genID() == GetGenerationFromID(id);
}
-void GrBatchAtlas::setLastUseToken(AtlasID id, BatchToken batchToken) {
+void GrBatchAtlas::setLastUseToken(AtlasID id, GrBatchToken batchToken) {
SkASSERT(this->hasID(id));
uint32_t index = GetIndexFromID(id);
SkASSERT(index < fNumPlotsX * fNumPlotsY);
@@ -370,7 +367,8 @@ void GrBatchAtlas::setLastUseToken(AtlasID id, BatchToken batchToken) {
fPlotArray[index]->setLastUseToken(batchToken);
}
-void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater, BatchToken batchToken) {
+void GrBatchAtlas::setLastUseTokenBulk(const BulkUseTokenUpdater& updater,
+ GrBatchToken batchToken) {
int count = updater.fPlotsToUpdate.count();
for (int i = 0; i < count; i++) {
BatchPlot* plot = fPlotArray[updater.fPlotsToUpdate[i]];
diff --git a/src/gpu/GrBatchAtlas.h b/src/gpu/GrBatchAtlas.h
index 96d229810b..4948953141 100644
--- a/src/gpu/GrBatchAtlas.h
+++ b/src/gpu/GrBatchAtlas.h
@@ -9,19 +9,18 @@
#define GrBatchAtlas_DEFINED
#include "GrTexture.h"
+#include "batches/GrDrawBatch.h"
#include "SkPoint.h"
#include "SkTDArray.h"
#include "SkTInternalLList.h"
class BatchPlot;
-class GrBatchTarget;
class GrRectanizer;
typedef SkTInternalLList<BatchPlot> GrBatchPlotList;
class GrBatchAtlas {
public:
- typedef uint64_t BatchToken;
// An AtlasID is an opaque handle which callers can use to determine if the atlas contains
// a specific piece of data
typedef uint64_t AtlasID;
@@ -43,7 +42,7 @@ public:
// NOTE: If the client intends to refer to the atlas, they should immediately call 'setUseToken'
// with the currentToken from the batch target, otherwise the next call to addToAtlas might
// cause an eviction
- bool addToAtlas(AtlasID*, GrBatchTarget*, int width, int height, const void* image,
+ bool addToAtlas(AtlasID*, GrDrawBatch::Target*, int width, int height, const void* image,
SkIPoint16* loc);
GrTexture* getTexture() const { return fTexture; }
@@ -52,7 +51,7 @@ public:
bool hasID(AtlasID id);
// To ensure the atlas does not evict a given entry, the client must set the last use token
- void setLastUseToken(AtlasID id, BatchToken batchToken);
+ void setLastUseToken(AtlasID id, GrBatchToken batchToken);
void registerEvictionCallback(EvictionFunc func, void* userData) {
EvictionData* data = fEvictionCallbacks.append();
data->fFunc = func;
@@ -104,7 +103,7 @@ public:
friend class GrBatchAtlas;
};
- void setLastUseTokenBulk(const BulkUseTokenUpdater& reffer, BatchToken);
+ void setLastUseTokenBulk(const BulkUseTokenUpdater& reffer, GrBatchToken);
static const int kGlyphMaxDim = 256;
static bool GlyphTooLargeForAtlas(int width, int height) {
@@ -121,7 +120,7 @@ private:
return (id >> 16) & 0xffffffffffff;
}
- inline void updatePlot(GrBatchTarget*, AtlasID*, BatchPlot*);
+ inline void updatePlot(GrDrawBatch::Target*, AtlasID*, BatchPlot*);
inline void makeMRU(BatchPlot* plot);
diff --git a/src/gpu/GrBatchFlushState.cpp b/src/gpu/GrBatchFlushState.cpp
new file mode 100644
index 0000000000..f12066609e
--- /dev/null
+++ b/src/gpu/GrBatchFlushState.cpp
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchFlushState.h"
+
+#include "GrBatchAtlas.h"
+#include "GrPipeline.h"
+
+GrBatchFlushState::GrBatchFlushState(GrGpu* gpu, GrResourceProvider* resourceProvider,
+ GrBatchToken lastFlushedToken)
+ : fGpu(gpu)
+ , fUploader(gpu)
+ , fResourceProvider(resourceProvider)
+ , fVertexPool(gpu)
+ , fIndexPool(gpu)
+ , fCurrentToken(lastFlushedToken)
+ , fLastFlushedToken(lastFlushedToken) {}
+
+void* GrBatchFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrVertexBuffer** buffer, int* startVertex) {
+ return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
+}
+
+uint16_t* GrBatchFlushState::makeIndexSpace(int indexCount,
+ const GrIndexBuffer** buffer, int* startIndex) {
+ return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
+}
diff --git a/src/gpu/GrBatchFlushState.h b/src/gpu/GrBatchFlushState.h
new file mode 100644
index 0000000000..5e68e28285
--- /dev/null
+++ b/src/gpu/GrBatchFlushState.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchBuffer_DEFINED
+#define GrBatchBuffer_DEFINED
+
+#include "GrBufferAllocPool.h"
+#include "batches/GrVertexBatch.h"
+
+class GrResourceProvider;
+
+/** Simple class that performs the upload on behalf of a GrBatchUploader. */
+class GrBatchUploader::TextureUploader {
+public:
+ TextureUploader(GrGpu* gpu) : fGpu(gpu) { SkASSERT(gpu); }
+
+ /**
+ * Updates the pixels in a rectangle of a texture.
+ *
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param buffer memory to read pixels from
+ * @param rowBytes number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
+ */
+ bool writeTexturePixels(GrTexture* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes) {
+ return fGpu->writePixels(texture, left, top, width, height, config, buffer, rowBytes);
+ }
+
+private:
+ GrGpu* fGpu;
+};
+
+/** Tracks the state across all the GrBatches in a GrDrawTarget flush. */
+class GrBatchFlushState {
+public:
+ GrBatchFlushState(GrGpu*, GrResourceProvider*, GrBatchToken lastFlushedToken);
+
+ ~GrBatchFlushState() { SkASSERT(fLastFlushedToken == fCurrentToken); }
+
+ void advanceToken() { ++fCurrentToken; }
+
+ void advanceLastFlushedToken() { ++fLastFlushedToken; }
+
+ /** Inserts an upload to be executred after all batches in the flush prepared their draws
+ but before the draws are executed to the backend 3D API. */
+ void addASAPUpload(GrBatchUploader* upload) {
+ fAsapUploads.push_back().reset(SkRef(upload));
+ }
+
+ const GrCaps& caps() const { return *fGpu->caps(); }
+ GrResourceProvider* resourceProvider() const { return fResourceProvider; }
+
+ /** Has the token been flushed to the backend 3D API. */
+ bool hasTokenBeenFlushed(GrBatchToken token) const { return fLastFlushedToken >= token; }
+
+ /** The current token advances once for every contiguous set of uninterrupted draws prepared
+ by a batch. */
+ GrBatchToken currentToken() const { return fCurrentToken; }
+
+ /** The last token flushed to all the way to the backend API. */
+ GrBatchToken lastFlushedToken() const { return fLastFlushedToken; }
+
+ /** This is a magic token that can be used to indicate that an upload should occur before
+ any draws for any batch in the current flush execute. */
+ GrBatchToken asapToken() const { return fLastFlushedToken + 1; }
+
+ void* makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrVertexBuffer** buffer, int* startVertex);
+ uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex);
+
+ /** This is called after each batch has a chance to prepare its draws and before the draws
+ are issued. */
+ void preIssueDraws() {
+ fVertexPool.unmap();
+ fIndexPool.unmap();
+ int uploadCount = fAsapUploads.count();
+ for (int i = 0; i < uploadCount; i++) {
+ fAsapUploads[i]->upload(&fUploader);
+ }
+ fAsapUploads.reset();
+ }
+
+ void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
+
+ void putBackVertexSpace(size_t sizeInBytes) { fVertexPool.putBack(sizeInBytes); }
+
+ GrBatchUploader::TextureUploader* uploader() { return &fUploader; }
+
+ GrGpu* gpu() { return fGpu; }
+
+private:
+ GrGpu* fGpu;
+ GrBatchUploader::TextureUploader fUploader;
+
+ GrResourceProvider* fResourceProvider;
+
+ GrVertexBufferAllocPool fVertexPool;
+ GrIndexBufferAllocPool fIndexPool;
+
+ SkTArray<SkAutoTUnref<GrBatchUploader>, true> fAsapUploads;
+
+ GrBatchToken fCurrentToken;
+
+ GrBatchToken fLastFlushedToken;
+};
+
+/**
+ * GrDrawBatch instances use this object to allocate space for their geometry and to issue the draws
+ * that render their batch.
+ */
+class GrDrawBatch::Target {
+public:
+ Target(GrBatchFlushState* state, GrDrawBatch* batch) : fState(state), fBatch(batch) {}
+
+ void upload(GrBatchUploader* upload) {
+ if (this->asapToken() == upload->lastUploadToken()) {
+ fState->addASAPUpload(upload);
+ } else {
+ fBatch->fInlineUploads.push_back().reset(SkRef(upload));
+ }
+ }
+
+ bool hasTokenBeenFlushed(GrBatchToken token) const {
+ return fState->hasTokenBeenFlushed(token);
+ }
+ GrBatchToken currentToken() const { return fState->currentToken(); }
+ GrBatchToken asapToken() const { return fState->asapToken(); }
+
+ const GrCaps& caps() const { return fState->caps(); }
+
+ GrResourceProvider* resourceProvider() const { return fState->resourceProvider(); }
+
+protected:
+ GrDrawBatch* batch() { return fBatch; }
+ GrBatchFlushState* state() { return fState; }
+
+private:
+ GrBatchFlushState* fState;
+ GrDrawBatch* fBatch;
+};
+
+/** Extension of GrDrawBatch::Target for use by GrVertexBatch. Adds the ability to create vertex
+ draws. */
+class GrVertexBatch::Target : public GrDrawBatch::Target {
+public:
+ Target(GrBatchFlushState* state, GrVertexBatch* batch) : INHERITED(state, batch) {}
+
+ void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
+ GrVertexBatch::DrawArray* draws = this->vertexBatch()->fDrawArrays.addToTail();
+ draws->fPrimitiveProcessor.reset(primProc);
+ this->state()->advanceToken();
+ }
+
+ void draw(const GrVertices& vertices) {
+ this->vertexBatch()->fDrawArrays.tail()->fDraws.push_back(vertices);
+ }
+
+ void* makeVertexSpace(size_t vertexSize, int vertexCount,
+ const GrVertexBuffer** buffer, int* startVertex) {
+ return this->state()->makeVertexSpace(vertexSize, vertexCount, buffer, startVertex);
+ }
+
+ uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex) {
+ return this->state()->makeIndexSpace(indexCount, buffer, startIndex);
+ }
+
+ /** Helpers for batches which over-allocate and then return data to the pool. */
+ void putBackIndices(int indices) { this->state()->putBackIndices(indices); }
+ void putBackVertices(int vertices, size_t vertexStride) {
+ this->state()->putBackVertexSpace(vertices * vertexStride);
+ }
+
+private:
+ GrVertexBatch* vertexBatch() { return static_cast<GrVertexBatch*>(this->batch()); }
+ typedef GrDrawBatch::Target INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrBatchFontCache.cpp b/src/gpu/GrBatchFontCache.cpp
index dfab80af26..88b3a4db17 100644
--- a/src/gpu/GrBatchFontCache.cpp
+++ b/src/gpu/GrBatchFontCache.cpp
@@ -173,7 +173,7 @@ void GrBatchTextStrike::removeID(GrBatchAtlas::AtlasID id) {
}
}
-bool GrBatchTextStrike::addGlyphToAtlas(GrBatchTarget* batchTarget, GrGlyph* glyph,
+bool GrBatchTextStrike::addGlyphToAtlas(GrDrawBatch::Target* target, GrGlyph* glyph,
GrFontScaler* scaler, const SkGlyph& skGlyph,
GrMaskFormat expectedMaskFormat) {
SkASSERT(glyph);
@@ -200,7 +200,7 @@ bool GrBatchTextStrike::addGlyphToAtlas(GrBatchTarget* batchTarget, GrGlyph* gly
}
}
- bool success = fBatchFontCache->addToAtlas(this, &glyph->fID, batchTarget, expectedMaskFormat,
+ bool success = fBatchFontCache->addToAtlas(this, &glyph->fID, target, expectedMaskFormat,
glyph->width(), glyph->height(),
storage.get(), &glyph->fAtlasLocation);
if (success) {
diff --git a/src/gpu/GrBatchFontCache.h b/src/gpu/GrBatchFontCache.h
index 998c220e8a..f315a3eb77 100644
--- a/src/gpu/GrBatchFontCache.h
+++ b/src/gpu/GrBatchFontCache.h
@@ -16,7 +16,6 @@
#include "SkVarAlloc.h"
class GrBatchFontCache;
-class GrBatchTarget;
class GrGpu;
/**
@@ -59,7 +58,7 @@ public:
// happen.
// TODO we can handle some of these cases if we really want to, but the long term solution is to
// get the actual glyph image itself when we get the glyph metrics.
- bool addGlyphToAtlas(GrBatchTarget*, GrGlyph*, GrFontScaler*, const SkGlyph&,
+ bool addGlyphToAtlas(GrDrawBatch::Target*, GrGlyph*, GrFontScaler*, const SkGlyph&,
GrMaskFormat expectedMaskFormat);
// testing
@@ -134,30 +133,30 @@ public:
}
// To ensure the GrBatchAtlas does not evict the Glyph Mask from its texture backing store,
- // the client must pass in the currentToken from the GrBatchTarget along with the GrGlyph.
+ // the client must pass in the current batch token along with the GrGlyph.
// A BulkUseTokenUpdater is used to manage bulk last use token updating in the Atlas.
// For convenience, this function will also set the use token for the current glyph if required
// NOTE: the bulk uploader is only valid if the subrun has a valid atlasGeneration
void addGlyphToBulkAndSetUseToken(GrBatchAtlas::BulkUseTokenUpdater* updater,
- GrGlyph* glyph, GrBatchAtlas::BatchToken token) {
+ GrGlyph* glyph, GrBatchToken token) {
SkASSERT(glyph);
updater->add(glyph->fID);
this->getAtlas(glyph->fMaskFormat)->setLastUseToken(glyph->fID, token);
}
void setUseTokenBulk(const GrBatchAtlas::BulkUseTokenUpdater& updater,
- GrBatchAtlas::BatchToken token,
+ GrBatchToken token,
GrMaskFormat format) {
this->getAtlas(format)->setLastUseTokenBulk(updater, token);
}
// add to texture atlas that matches this format
bool addToAtlas(GrBatchTextStrike* strike, GrBatchAtlas::AtlasID* id,
- GrBatchTarget* batchTarget,
+ GrDrawBatch::Target* target,
GrMaskFormat format, int width, int height, const void* image,
SkIPoint16* loc) {
fPreserveStrike = strike;
- return this->getAtlas(format)->addToAtlas(id, batchTarget, width, height, image, loc);
+ return this->getAtlas(format)->addToAtlas(id, target, width, height, image, loc);
}
// Some clients may wish to verify the integrity of the texture backing store of the
diff --git a/src/gpu/GrBatchTarget.cpp b/src/gpu/GrBatchTarget.cpp
deleted file mode 100644
index b6dadd699b..0000000000
--- a/src/gpu/GrBatchTarget.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "GrBatchTarget.h"
-
-#include "GrBatchAtlas.h"
-#include "GrPipeline.h"
-
-GrBatchTarget::GrBatchTarget(GrGpu* gpu)
- : fGpu(gpu)
- , fVertexPool(gpu)
- , fIndexPool(gpu)
- , fFlushBuffer(kFlushBufferInitialSizeInBytes)
- , fIter(fFlushBuffer)
- , fNumberOfDraws(0)
- , fCurrentToken(0)
- , fLastFlushedToken(0)
- , fInlineUpdatesIndex(0) {
-}
-
-void GrBatchTarget::flushNext(int n) {
- for (; n > 0; n--) {
- fLastFlushedToken++;
- SkDEBUGCODE(bool verify =) fIter.next();
- SkASSERT(verify);
-
- BufferedFlush* bf = fIter.get();
-
- // Flush all texture uploads
- int uploadCount = fInlineUploads.count();
- while (fInlineUpdatesIndex < uploadCount &&
- fInlineUploads[fInlineUpdatesIndex]->lastUploadToken() <= fLastFlushedToken) {
- fInlineUploads[fInlineUpdatesIndex++]->upload(TextureUploader(fGpu));
- }
-
- GrProgramDesc desc;
- const GrPipeline* pipeline = bf->fPipeline;
- const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
- fGpu->buildProgramDesc(&desc, *primProc, *pipeline, bf->fBatchTracker);
-
- GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
-
- int drawCount = bf->fVertexDraws.count();
- const SkSTArray<1, GrVertices, true>& vertexDraws = bf->fVertexDraws;
- for (int i = 0; i < drawCount; i++) {
- fGpu->draw(args, vertexDraws[i]);
- }
- }
-}
-
-void* GrBatchTarget::makeVertSpace(size_t vertexSize, int vertexCount,
- const GrVertexBuffer** buffer, int* startVertex) {
- return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
-}
-
-uint16_t* GrBatchTarget::makeIndexSpace(int indexCount,
- const GrIndexBuffer** buffer, int* startIndex) {
- return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
-}
-
diff --git a/src/gpu/GrBatchTarget.h b/src/gpu/GrBatchTarget.h
deleted file mode 100644
index 8a78904b10..0000000000
--- a/src/gpu/GrBatchTarget.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrBatchBuffer_DEFINED
-#define GrBatchBuffer_DEFINED
-
-#include "GrBatchAtlas.h"
-#include "GrBufferAllocPool.h"
-#include "GrContext.h"
-#include "GrPendingProgramElement.h"
-#include "GrPipeline.h"
-#include "GrTRecorder.h"
-#include "GrVertices.h"
-
-/*
- * GrBatch instances use this object to allocate space for their geometry and to issue the draws
- * that render their batch.
- */
-class GrBatchTarget : public SkNoncopyable {
-public:
- typedef GrBatchAtlas::BatchToken BatchToken;
- GrBatchTarget(GrGpu* gpu);
-
- void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
- GrNEW_APPEND_TO_RECORDER(fFlushBuffer, BufferedFlush, (primProc, pipeline));
- fNumberOfDraws++;
- fCurrentToken++;
- }
-
- class TextureUploader {
- public:
- TextureUploader(GrGpu* gpu) : fGpu(gpu) { SkASSERT(gpu); }
-
- /**
- * Updates the pixels in a rectangle of a texture.
- *
- * @param left left edge of the rectangle to write (inclusive)
- * @param top top edge of the rectangle to write (inclusive)
- * @param width width of rectangle to write in pixels.
- * @param height height of rectangle to write in pixels.
- * @param config the pixel config of the source buffer
- * @param buffer memory to read pixels from
- * @param rowBytes number of bytes between consecutive rows. Zero
- * means rows are tightly packed.
- */
- bool writeTexturePixels(GrTexture* texture,
- int left, int top, int width, int height,
- GrPixelConfig config, const void* buffer,
- size_t rowBytes) {
- return fGpu->writePixels(texture, left, top, width, height, config, buffer, rowBytes);
- }
-
- private:
- GrGpu* fGpu;
- };
-
- class Uploader : public SkRefCnt {
- public:
- Uploader(BatchToken lastUploadToken) : fLastUploadToken(lastUploadToken) {}
- BatchToken lastUploadToken() const { return fLastUploadToken; }
- virtual void upload(TextureUploader)=0;
-
- private:
- BatchToken fLastUploadToken;
- };
-
- void upload(Uploader* upload) {
- if (this->asapToken() == upload->lastUploadToken()) {
- fAsapUploads.push_back().reset(SkRef(upload));
- } else {
- fInlineUploads.push_back().reset(SkRef(upload));
- }
- }
-
- void draw(const GrVertices& vertices) {
- fFlushBuffer.back().fVertexDraws.push_back(vertices);
- }
-
- bool isIssued(BatchToken token) const { return fLastFlushedToken >= token; }
- BatchToken currentToken() const { return fCurrentToken; }
- BatchToken asapToken() const { return fLastFlushedToken + 1; }
-
- // TODO much of this complexity goes away when batch is everywhere
- void resetNumberOfDraws() { fNumberOfDraws = 0; }
- int numberOfDraws() const { return fNumberOfDraws; }
- void preFlush() {
- this->unmapVertexAndIndexBuffers();
- int updateCount = fAsapUploads.count();
- for (int i = 0; i < updateCount; i++) {
- fAsapUploads[i]->upload(TextureUploader(fGpu));
- }
- fInlineUpdatesIndex = 0;
- fIter = FlushBuffer::Iter(fFlushBuffer);
- }
- void flushNext(int n);
- void postFlush() {
- SkASSERT(!fIter.next());
- fFlushBuffer.reset();
- fAsapUploads.reset();
- fInlineUploads.reset();
- }
-
- const GrCaps& caps() const { return *fGpu->caps(); }
-
- GrResourceProvider* resourceProvider() const { return fGpu->getContext()->resourceProvider(); }
-
- void* makeVertSpace(size_t vertexSize, int vertexCount,
- const GrVertexBuffer** buffer, int* startVertex);
- uint16_t* makeIndexSpace(int indexCount,
- const GrIndexBuffer** buffer, int* startIndex);
-
- // A helper for draws which overallocate and then return data to the pool
- void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
-
- void putBackVertices(size_t vertices, size_t vertexStride) {
- fVertexPool.putBack(vertices * vertexStride);
- }
-
- void reset() {
- fVertexPool.reset();
- fIndexPool.reset();
- }
-
-private:
- void unmapVertexAndIndexBuffers() {
- fVertexPool.unmap();
- fIndexPool.unmap();
- }
-
- GrGpu* fGpu;
- GrVertexBufferAllocPool fVertexPool;
- GrIndexBufferAllocPool fIndexPool;
-
- typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
-
- struct BufferedFlush {
- BufferedFlush(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline)
- : fPrimitiveProcessor(primProc)
- , fPipeline(pipeline) {}
- typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
- ProgramPrimitiveProcessor fPrimitiveProcessor;
- const GrPipeline* fPipeline;
- GrBatchTracker fBatchTracker;
- SkSTArray<1, GrVertices, true> fVertexDraws;
- };
-
- enum {
- kFlushBufferInitialSizeInBytes = 8 * sizeof(BufferedFlush),
- };
-
- typedef GrTRecorder<BufferedFlush, TBufferAlign> FlushBuffer;
-
- FlushBuffer fFlushBuffer;
- // TODO this is temporary
- FlushBuffer::Iter fIter;
- int fNumberOfDraws;
- BatchToken fCurrentToken;
- BatchToken fLastFlushedToken; // The next token to be flushed
- SkTArray<SkAutoTUnref<Uploader>, true> fAsapUploads;
- SkTArray<SkAutoTUnref<Uploader>, true> fInlineUploads;
- int fInlineUpdatesIndex;
-};
-
-#endif
diff --git a/src/gpu/GrBufferedDrawTarget.cpp b/src/gpu/GrBufferedDrawTarget.cpp
index 36542a1127..a706bf3932 100644
--- a/src/gpu/GrBufferedDrawTarget.cpp
+++ b/src/gpu/GrBufferedDrawTarget.cpp
@@ -103,7 +103,7 @@ void GrBufferedDrawTarget::onReset() {
}
void GrBufferedDrawTarget::onFlush() {
- fCommands->flush(this);
+ fCommands->flush(this->getGpu(), this->getContext()->resourceProvider());
++fDrawID;
}
diff --git a/src/gpu/GrCommandBuilder.cpp b/src/gpu/GrCommandBuilder.cpp
index cfd2225785..76f830dca0 100644
--- a/src/gpu/GrCommandBuilder.cpp
+++ b/src/gpu/GrCommandBuilder.cpp
@@ -12,9 +12,9 @@
GrCommandBuilder* GrCommandBuilder::Create(GrGpu* gpu, bool reorder) {
if (reorder) {
- return SkNEW_ARGS(GrReorderCommandBuilder, (gpu));
+ return SkNEW(GrReorderCommandBuilder);
} else {
- return SkNEW_ARGS(GrInOrderCommandBuilder, (gpu));
+ return SkNEW(GrInOrderCommandBuilder);
}
}
diff --git a/src/gpu/GrCommandBuilder.h b/src/gpu/GrCommandBuilder.h
index 95fd8ec201..004fc790dc 100644
--- a/src/gpu/GrCommandBuilder.h
+++ b/src/gpu/GrCommandBuilder.h
@@ -10,6 +10,8 @@
#include "GrTargetCommands.h"
+class GrGpu;
+class GrResourceProvider;
class GrBufferedDrawTarget;
class GrCommandBuilder : ::SkNoncopyable {
@@ -22,7 +24,7 @@ public:
virtual ~GrCommandBuilder() {}
void reset() { fCommands.reset(); }
- void flush(GrBufferedDrawTarget* bufferedDrawTarget) { fCommands.flush(bufferedDrawTarget); }
+ void flush(GrGpu* gpu, GrResourceProvider* rp) { fCommands.flush(gpu, rp); }
virtual Cmd* recordClearStencilClip(const SkIRect& rect,
bool insideClip,
@@ -66,11 +68,9 @@ protected:
typedef GrTargetCommands::ClearStencilClip ClearStencilClip;
typedef GrTargetCommands::CopySurface CopySurface;
- GrCommandBuilder(GrGpu* gpu) : fCommands(gpu) {}
+ GrCommandBuilder() {}
GrTargetCommands::CmdBuffer* cmdBuffer() { return fCommands.cmdBuffer(); }
- GrBatchTarget* batchTarget() { return fCommands.batchTarget(); }
-
private:
GrTargetCommands fCommands;
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index ae8c53a7e9..91415ac2b4 100755
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -9,7 +9,7 @@
#include "GrContext.h"
#include "GrBatchFontCache.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrBufferedDrawTarget.h"
#include "GrCaps.h"
diff --git a/src/gpu/GrDefaultPathRenderer.cpp b/src/gpu/GrDefaultPathRenderer.cpp
index 9ab020495e..8c663390cc 100644
--- a/src/gpu/GrDefaultPathRenderer.cpp
+++ b/src/gpu/GrDefaultPathRenderer.cpp
@@ -7,7 +7,7 @@
#include "GrDefaultPathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrContext.h"
#include "GrDefaultGeoProcFactory.h"
@@ -248,7 +248,7 @@ public:
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkAutoTUnref<const GrGeometryProcessor> gp;
{
using namespace GrDefaultGeoProcFactory;
@@ -266,7 +266,7 @@ public:
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(SkPoint));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
@@ -313,8 +313,8 @@ public:
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, maxVertices,
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, maxVertices,
+ &vertexBuffer, &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
@@ -326,7 +326,7 @@ public:
void* indices = NULL;
if (isIndexed) {
- indices = batchTarget->makeIndexSpace(maxIndices, &indexBuffer, &firstIndex);
+ indices = target->makeIndexSpace(maxIndices, &indexBuffer, &firstIndex);
if (!indices) {
SkDebugf("Could not allocate indices\n");
@@ -366,11 +366,11 @@ public:
} else {
vertices.init(primitiveType, vertexBuffer, firstVertex, vertexOffset);
}
- batchTarget->draw(vertices);
+ target->draw(vertices);
// put back reserves
- batchTarget->putBackIndices((size_t)(maxIndices - indexOffset));
- batchTarget->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride);
+ target->putBackIndices((size_t)(maxIndices - indexOffset));
+ target->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
diff --git a/src/gpu/GrImmediateDrawTarget.cpp b/src/gpu/GrImmediateDrawTarget.cpp
index 5f8d4e6e02..bac9e6004b 100644
--- a/src/gpu/GrImmediateDrawTarget.cpp
+++ b/src/gpu/GrImmediateDrawTarget.cpp
@@ -18,7 +18,6 @@
GrImmediateDrawTarget::GrImmediateDrawTarget(GrContext* context)
: INHERITED(context)
- , fBatchTarget(this->getGpu())
, fDrawID(0) {
}
@@ -27,17 +26,18 @@ GrImmediateDrawTarget::~GrImmediateDrawTarget() {
}
void GrImmediateDrawTarget::onDrawBatch(GrDrawBatch* batch) {
- fBatchTarget.resetNumberOfDraws();
+#if 0
// TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
// remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(batch);
- vertexBatch->generateGeometry(&fBatchTarget);
+ vertexBatch->prepareDraws(&fBatchTarget);
vertexBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
fBatchTarget.preFlush();
fBatchTarget.flushNext(vertexBatch->numberOfDraws());
fBatchTarget.postFlush();
+#endif
}
void GrImmediateDrawTarget::onClear(const SkIRect& rect, GrColor color,
@@ -66,9 +66,7 @@ void GrImmediateDrawTarget::discard(GrRenderTarget* renderTarget) {
this->getGpu()->discard(renderTarget);
}
-void GrImmediateDrawTarget::onReset() {
- fBatchTarget.reset();
-}
+void GrImmediateDrawTarget::onReset() {}
void GrImmediateDrawTarget::onFlush() {
++fDrawID;
diff --git a/src/gpu/GrImmediateDrawTarget.h b/src/gpu/GrImmediateDrawTarget.h
index e7ec287c4c..cb2c243225 100644
--- a/src/gpu/GrImmediateDrawTarget.h
+++ b/src/gpu/GrImmediateDrawTarget.h
@@ -10,7 +10,7 @@
#include "GrDrawTarget.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
/**
* A debug GrDrawTarget which immediately flushes every command it receives
@@ -72,7 +72,6 @@ private:
bool isIssued(uint32_t drawID) override { return drawID != fDrawID; }
- GrBatchTarget fBatchTarget;
uint32_t fDrawID;
typedef GrClipTarget INHERITED;
diff --git a/src/gpu/GrInOrderCommandBuilder.cpp b/src/gpu/GrInOrderCommandBuilder.cpp
index 31fc6a2580..80989d17a7 100644
--- a/src/gpu/GrInOrderCommandBuilder.cpp
+++ b/src/gpu/GrInOrderCommandBuilder.cpp
@@ -38,7 +38,7 @@ GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(GrDrawBatch* bat
}
}
- return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch, this->batchTarget()));
+ return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch));
}
GrTargetCommands::Cmd*
diff --git a/src/gpu/GrInOrderCommandBuilder.h b/src/gpu/GrInOrderCommandBuilder.h
index 2908e10449..13a821256c 100644
--- a/src/gpu/GrInOrderCommandBuilder.h
+++ b/src/gpu/GrInOrderCommandBuilder.h
@@ -15,7 +15,7 @@ public:
typedef GrCommandBuilder::Cmd Cmd;
typedef GrCommandBuilder::State State;
- GrInOrderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) { }
+ GrInOrderCommandBuilder() : INHERITED() { }
Cmd* recordDrawBatch(GrDrawBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
diff --git a/src/gpu/GrOvalRenderer.cpp b/src/gpu/GrOvalRenderer.cpp
index fc6ecfab42..bd5042c027 100644
--- a/src/gpu/GrOvalRenderer.cpp
+++ b/src/gpu/GrOvalRenderer.cpp
@@ -7,7 +7,7 @@
#include "GrOvalRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrDrawTarget.h"
#include "GrGeometryProcessor.h"
@@ -666,7 +666,7 @@ public:
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
return;
@@ -678,13 +678,13 @@ public:
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(CircleVertex));
QuadHelper helper;
- CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(batchTarget, vertexStride,
+ CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(target, vertexStride,
instanceCount));
if (!verts) {
return;
@@ -722,7 +722,7 @@ public:
verts += kVerticesPerQuad;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
@@ -884,7 +884,7 @@ public:
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
return;
@@ -896,14 +896,14 @@ public:
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
QuadHelper helper;
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(EllipseVertex));
EllipseVertex* verts = reinterpret_cast<EllipseVertex*>(
- helper.init(batchTarget, vertexStride, instanceCount));
+ helper.init(target, vertexStride, instanceCount));
if (!verts) {
return;
}
@@ -945,7 +945,7 @@ public:
verts += kVerticesPerQuad;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
@@ -1152,21 +1152,21 @@ public:
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// Setup geometry processor
SkAutoTUnref<GrGeometryProcessor> gp(DIEllipseEdgeEffect::Create(this->color(),
this->viewMatrix(),
this->mode(),
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
SkASSERT(vertexStride == sizeof(DIEllipseVertex));
QuadHelper helper;
DIEllipseVertex* verts = reinterpret_cast<DIEllipseVertex*>(
- helper.init(batchTarget, vertexStride, instanceCount));
+ helper.init(target, vertexStride, instanceCount));
if (!verts) {
return;
}
@@ -1204,7 +1204,7 @@ public:
verts += kVerticesPerQuad;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
@@ -1503,7 +1503,7 @@ public:
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// reset to device coordinates
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
@@ -1517,7 +1517,7 @@ public:
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
@@ -1526,10 +1526,10 @@ public:
// drop out the middle quad if we're stroked
int indicesPerInstance = this->stroke() ? kIndicesPerStrokeRRect : kIndicesPerRRect;
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
- ref_rrect_index_buffer(this->stroke(), batchTarget->resourceProvider()));
+ ref_rrect_index_buffer(this->stroke(), target->resourceProvider()));
InstancedHelper helper;
- CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(batchTarget,
+ CircleVertex* verts = reinterpret_cast<CircleVertex*>(helper.init(target,
kTriangles_GrPrimitiveType, vertexStride, indexBuffer, kVertsPerRRect,
indicesPerInstance, instanceCount));
if (!verts || !indexBuffer) {
@@ -1581,7 +1581,7 @@ public:
}
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
@@ -1679,7 +1679,7 @@ public:
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// reset to device coordinates
SkMatrix invert;
if (!this->viewMatrix().invert(&invert)) {
@@ -1693,7 +1693,7 @@ public:
invert,
this->usesLocalCoords()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
@@ -1702,11 +1702,11 @@ public:
// drop out the middle quad if we're stroked
int indicesPerInstance = this->stroke() ? kIndicesPerStrokeRRect : kIndicesPerRRect;
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
- ref_rrect_index_buffer(this->stroke(), batchTarget->resourceProvider()));
+ ref_rrect_index_buffer(this->stroke(), target->resourceProvider()));
InstancedHelper helper;
EllipseVertex* verts = reinterpret_cast<EllipseVertex*>(
- helper.init(batchTarget, kTriangles_GrPrimitiveType, vertexStride, indexBuffer,
+ helper.init(target, kTriangles_GrPrimitiveType, vertexStride, indexBuffer,
kVertsPerRRect, indicesPerInstance, instanceCount));
if (!verts || !indexBuffer) {
SkDebugf("Could not allocate vertices\n");
@@ -1767,7 +1767,7 @@ public:
verts++;
}
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
diff --git a/src/gpu/GrReorderCommandBuilder.cpp b/src/gpu/GrReorderCommandBuilder.cpp
index f08b7f17d0..d70e25ad81 100644
--- a/src/gpu/GrReorderCommandBuilder.cpp
+++ b/src/gpu/GrReorderCommandBuilder.cpp
@@ -105,5 +105,5 @@ GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrDrawBatch* bat
}
#endif
- return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch, this->batchTarget()));
+ return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (batch));
}
diff --git a/src/gpu/GrReorderCommandBuilder.h b/src/gpu/GrReorderCommandBuilder.h
index a5f984c573..af4a28cf07 100644
--- a/src/gpu/GrReorderCommandBuilder.h
+++ b/src/gpu/GrReorderCommandBuilder.h
@@ -15,7 +15,7 @@ public:
typedef GrCommandBuilder::Cmd Cmd;
typedef GrCommandBuilder::State State;
- GrReorderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) {}
+ GrReorderCommandBuilder() : INHERITED() {}
Cmd* recordDrawBatch(GrDrawBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
diff --git a/src/gpu/GrTargetCommands.cpp b/src/gpu/GrTargetCommands.cpp
index 3f9d7c860f..c2006d731f 100644
--- a/src/gpu/GrTargetCommands.cpp
+++ b/src/gpu/GrTargetCommands.cpp
@@ -7,8 +7,9 @@
#include "GrTargetCommands.h"
-#include "GrBufferedDrawTarget.h"
-
+#include "GrBatchFlushState.h"
+#include "GrGpu.h"
+#include "GrPathRendering.h"
#include "batches/GrDrawBatch.h"
#include "batches/GrVertexBatch.h"
@@ -16,91 +17,85 @@ GrBATCH_SPEW(int32_t GrTargetCommands::Cmd::gUniqueID = 0;)
void GrTargetCommands::reset() {
fCmdBuffer.reset();
- fBatchTarget.reset();
}
-void GrTargetCommands::flush(GrBufferedDrawTarget* bufferedDrawTarget) {
+void GrTargetCommands::flush(GrGpu* gpu, GrResourceProvider* resourceProvider) {
GrBATCH_INFO("Flushing\n");
if (fCmdBuffer.empty()) {
return;
}
-
- GrGpu* gpu = bufferedDrawTarget->getGpu();
-
+ GrBatchFlushState flushState(gpu, resourceProvider, fLastFlushToken);
// Loop over all batches and generate geometry
CmdBuffer::Iter genIter(fCmdBuffer);
while (genIter.next()) {
if (Cmd::kDrawBatch_CmdType == genIter->type()) {
DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get());
- fBatchTarget.resetNumberOfDraws();
// TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
// remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(db->batch());
- vertexBatch->generateGeometry(&fBatchTarget);
- vertexBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
+ vertexBatch->prepareDraws(&flushState);
}
}
- fBatchTarget.preFlush();
+ flushState.preIssueDraws();
CmdBuffer::Iter iter(fCmdBuffer);
-
while (iter.next()) {
- iter->execute(gpu);
+ iter->execute(&flushState);
}
-
- fBatchTarget.postFlush();
+ fLastFlushToken = flushState.lastFlushedToken();
}
-void GrTargetCommands::StencilPath::execute(GrGpu* gpu) {
+void GrTargetCommands::StencilPath::execute(GrBatchFlushState* state) {
GrPathRendering::StencilPathArgs args(fUseHWAA, fRenderTarget.get(), &fViewMatrix, &fScissor,
&fStencil);
- gpu->pathRendering()->stencilPath(args, this->path());
+ state->gpu()->pathRendering()->stencilPath(args, this->path());
}
-void GrTargetCommands::DrawPath::execute(GrGpu* gpu) {
+void GrTargetCommands::DrawPath::execute(GrBatchFlushState* state) {
if (!fState->fCompiled) {
- gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
- fState->fBatchTracker);
+ state->gpu()->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor,
+ *fState->getPipeline(), fState->fBatchTracker);
fState->fCompiled = true;
}
GrPathRendering::DrawPathArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
&fState->fDesc, &fState->fBatchTracker, &fStencilSettings);
- gpu->pathRendering()->drawPath(args, this->path());
+ state->gpu()->pathRendering()->drawPath(args, this->path());
}
-void GrTargetCommands::DrawPaths::execute(GrGpu* gpu) {
+void GrTargetCommands::DrawPaths::execute(GrBatchFlushState* state) {
if (!fState->fCompiled) {
- gpu->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor, *fState->getPipeline(),
- fState->fBatchTracker);
+ state->gpu()->buildProgramDesc(&fState->fDesc, *fState->fPrimitiveProcessor,
+ *fState->getPipeline(), fState->fBatchTracker);
fState->fCompiled = true;
}
GrPathRendering::DrawPathArgs args(fState->fPrimitiveProcessor.get(), fState->getPipeline(),
&fState->fDesc, &fState->fBatchTracker, &fStencilSettings);
- gpu->pathRendering()->drawPaths(args, this->pathRange(), fIndices, fIndexType, fTransforms,
- fTransformType, fCount);
+ state->gpu()->pathRendering()->drawPaths(args, this->pathRange(), fIndices, fIndexType,
+ fTransforms, fTransformType, fCount);
}
-void GrTargetCommands::DrawBatch::execute(GrGpu* gpu) {
+void GrTargetCommands::DrawBatch::execute(GrBatchFlushState* state) {
// TODO: encapsulate the specialization of GrVertexBatch in GrVertexBatch so that we can
// remove this cast. Currently all GrDrawBatches are in fact GrVertexBatch.
- const GrVertexBatch* vertexBatch = static_cast<const GrVertexBatch*>(fBatch.get());
- fBatchTarget->flushNext(vertexBatch->numberOfDraws());
+ GrVertexBatch* vertexBatch = static_cast<GrVertexBatch*>(fBatch.get());
+ vertexBatch->issueDraws(state);
}
-void GrTargetCommands::Clear::execute(GrGpu* gpu) {
+
+void GrTargetCommands::Clear::execute(GrBatchFlushState* state) {
if (GrColor_ILLEGAL == fColor) {
- gpu->discard(this->renderTarget());
+ state->gpu()->discard(this->renderTarget());
} else {
- gpu->clear(fRect, fColor, this->renderTarget());
+ state->gpu()->clear(fRect, fColor, this->renderTarget());
}
}
-void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu) {
- gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
+void GrTargetCommands::ClearStencilClip::execute(GrBatchFlushState* state) {
+ state->gpu()->clearStencilClip(fRect, fInsideClip, this->renderTarget());
}
-void GrTargetCommands::CopySurface::execute(GrGpu* gpu) {
- gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
+void GrTargetCommands::CopySurface::execute(GrBatchFlushState* state) {
+ state->gpu()->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
}
diff --git a/src/gpu/GrTargetCommands.h b/src/gpu/GrTargetCommands.h
index bf973b6872..3c08d2d24d 100644
--- a/src/gpu/GrTargetCommands.h
+++ b/src/gpu/GrTargetCommands.h
@@ -8,27 +8,23 @@
#ifndef GrTargetCommands_DEFINED
#define GrTargetCommands_DEFINED
-#include "GrBatchTarget.h"
#include "GrDrawTarget.h"
-#include "GrGpu.h"
#include "GrPath.h"
#include "GrPendingProgramElement.h"
+#include "GrPrimitiveProcessor.h"
#include "GrRenderTarget.h"
#include "GrTRecorder.h"
-#include "SkRect.h"
-#include "SkTypes.h"
#include "batches/GrDrawBatch.h"
+#include "SkRect.h"
-class GrBufferedDrawTarget;
+class GrResourceProvider;
+class GrBatchFlushState;
// TODO: Convert all commands into GrBatch and remove this class.
class GrTargetCommands : ::SkNoncopyable {
public:
- GrTargetCommands(GrGpu* gpu)
- : fCmdBuffer(kCmdBufferInitialSizeInBytes)
- , fBatchTarget(gpu) {
- }
+ GrTargetCommands() : fCmdBuffer(kCmdBufferInitialSizeInBytes), fLastFlushToken(0) {}
class Cmd : ::SkNoncopyable {
public:
@@ -50,7 +46,7 @@ public:
{}
virtual ~Cmd() {}
- virtual void execute(GrGpu*) = 0;
+ virtual void execute(GrBatchFlushState*) = 0;
CmdType type() const { return fType; }
@@ -71,7 +67,7 @@ public:
};
void reset();
- void flush(GrBufferedDrawTarget*);
+ void flush(GrGpu*, GrResourceProvider*);
private:
friend class GrCommandBuilder;
@@ -132,7 +128,7 @@ private:
const GrPath* path() const { return fPath.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkMatrix fViewMatrix;
bool fUseHWAA;
@@ -151,7 +147,7 @@ private:
const GrPath* path() const { return fPath.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkAutoTUnref<StateForPathDraw> fState;
GrStencilSettings fStencilSettings;
@@ -167,7 +163,7 @@ private:
const GrPathRange* pathRange() const { return fPathRange.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkAutoTUnref<StateForPathDraw> fState;
char* fIndices;
@@ -187,7 +183,7 @@ private:
GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkIRect fRect;
GrColor fColor;
@@ -202,7 +198,7 @@ private:
GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkIRect fRect;
bool fInsideClip;
@@ -221,7 +217,7 @@ private:
GrSurface* dst() const { return fDst.get(); }
GrSurface* src() const { return fSrc.get(); }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
SkIPoint fDstPoint;
SkIRect fSrcRect;
@@ -232,19 +228,17 @@ private:
};
struct DrawBatch : public Cmd {
- DrawBatch(GrDrawBatch* batch, GrBatchTarget* batchTarget)
+ DrawBatch(GrDrawBatch* batch)
: Cmd(kDrawBatch_CmdType)
- , fBatch(SkRef(batch))
- , fBatchTarget(batchTarget) {
+ , fBatch(SkRef(batch)){
SkASSERT(!batch->isUsed());
}
GrDrawBatch* batch() { return fBatch; }
- void execute(GrGpu*) override;
+ void execute(GrBatchFlushState*) override;
private:
SkAutoTUnref<GrDrawBatch> fBatch;
- GrBatchTarget* fBatchTarget;
};
static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
@@ -253,11 +247,9 @@ private:
typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
CmdBuffer* cmdBuffer() { return &fCmdBuffer; }
- GrBatchTarget* batchTarget() { return &fBatchTarget; }
CmdBuffer fCmdBuffer;
- GrBatchTarget fBatchTarget;
+ GrBatchToken fLastFlushToken;
};
#endif
-
diff --git a/src/gpu/GrTessellatingPathRenderer.cpp b/src/gpu/GrTessellatingPathRenderer.cpp
index c2a990000f..8126c6c381 100644
--- a/src/gpu/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/GrTessellatingPathRenderer.cpp
@@ -7,7 +7,7 @@
#include "GrTessellatingPathRenderer.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrPathUtils.h"
@@ -1509,7 +1509,7 @@ public:
return actualCount;
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
// construct a cache key from the path's genID and the view matrix
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
@@ -1525,7 +1525,7 @@ public:
}
fStroke.asUniqueKeyFragment(&builder[2 + clipBoundsSize32]);
builder.finish();
- GrResourceProvider* rp = batchTarget->resourceProvider();
+ GrResourceProvider* rp = target->resourceProvider();
SkAutoTUnref<GrVertexBuffer> vertexBuffer(rp->findAndRefTByUniqueKey<GrVertexBuffer>(key));
int actualCount;
SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance;
@@ -1558,14 +1558,14 @@ public:
fViewMatrix));
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
GrPrimitiveType primitiveType = WIREFRAME ? kLines_GrPrimitiveType
: kTriangles_GrPrimitiveType;
GrVertices vertices;
vertices.init(primitiveType, vertexBuffer.get(), 0, actualCount);
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
diff --git a/src/gpu/batches/GrAAFillRectBatch.cpp b/src/gpu/batches/GrAAFillRectBatch.cpp
index d8ef3d3152..098c6bdae4 100644
--- a/src/gpu/batches/GrAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrAAFillRectBatch.cpp
@@ -7,6 +7,7 @@
#include "GrAAFillRectBatch.h"
+#include "GrBatchFlushState.h"
#include "GrColor.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrResourceKey.h"
@@ -96,7 +97,7 @@ public:
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
SkAutoTUnref<const GrGeometryProcessor> gp(CreateFillRectGP(canTweakAlphaForCoverage,
@@ -109,17 +110,16 @@ public:
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
SkASSERT(Base::StrideCheck(vertexStride, canTweakAlphaForCoverage,
this->usesLocalCoords()));
int instanceCount = fGeoData.count();
- SkAutoTUnref<const GrIndexBuffer> indexBuffer(get_index_buffer(
- batchTarget->resourceProvider()));
+ SkAutoTUnref<const GrIndexBuffer> indexBuffer(get_index_buffer(target->resourceProvider()));
InstancedHelper helper;
- void* vertices = helper.init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
indexBuffer, kVertsPerAAFillRect, kIndicesPerAAFillRect,
instanceCount);
if (!vertices || !indexBuffer) {
@@ -134,7 +134,7 @@ public:
fGeoData[i],
canTweakAlphaForCoverage);
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp
index 91ff230f0d..bab5f02580 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -7,6 +7,7 @@
#include "GrAAStrokeRectBatch.h"
+#include "GrBatchFlushState.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrResourceKey.h"
#include "GrResourceProvider.h"
@@ -59,7 +60,7 @@ void GrAAStrokeRectBatch::initBatchTracker(const GrPipelineOptimizations& opt) {
fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
}
-void GrAAStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrAAStrokeRectBatch::onPrepareDraws(Target* target) {
bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
SkAutoTUnref<const GrGeometryProcessor> gp(create_stroke_rect_gp(canTweakAlphaForCoverage,
@@ -71,7 +72,7 @@ void GrAAStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
@@ -85,9 +86,9 @@ void GrAAStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
int instanceCount = fGeoData.count();
const SkAutoTUnref<const GrIndexBuffer> indexBuffer(
- GetIndexBuffer(batchTarget->resourceProvider(), this->miterStroke()));
+ GetIndexBuffer(target->resourceProvider(), this->miterStroke()));
InstancedHelper helper;
- void* vertices = helper.init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
+ void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
indexBuffer, verticesPerInstance, indicesPerInstance,
instanceCount);
if (!vertices || !indexBuffer) {
@@ -109,7 +110,7 @@ void GrAAStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
args.fMiterStroke,
canTweakAlphaForCoverage);
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
const GrIndexBuffer* GrAAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourceProvider,
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.h b/src/gpu/batches/GrAAStrokeRectBatch.h
index f9c4f3ed9d..2c6b0dbcf0 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.h
+++ b/src/gpu/batches/GrAAStrokeRectBatch.h
@@ -14,6 +14,8 @@
#include "SkMatrix.h"
#include "SkRect.h"
+class GrResourceProvider;
+
class GrAAStrokeRectBatch : public GrVertexBatch {
public:
// TODO support AA rotated stroke rects by copying around view matrices
@@ -42,11 +44,11 @@ public:
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
GrAAStrokeRectBatch(const Geometry& geometry, const SkMatrix& viewMatrix) {
this->initClassID<GrAAStrokeRectBatch>();
fBatch.fViewMatrix = viewMatrix;
diff --git a/src/gpu/batches/GrBWFillRectBatch.cpp b/src/gpu/batches/GrBWFillRectBatch.cpp
index c0c93c7614..f797e9ef4d 100644
--- a/src/gpu/batches/GrBWFillRectBatch.cpp
+++ b/src/gpu/batches/GrBWFillRectBatch.cpp
@@ -7,13 +7,13 @@
#include "GrBWFillRectBatch.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrColor.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrPrimitiveProcessor.h"
#include "GrVertexBatch.h"
-class GrBatchTarget;
+class GrBatchFlushState;
class SkMatrix;
struct SkRect;
@@ -58,14 +58,14 @@ public:
fBatch.fCoverageIgnored = !init.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
SkAutoTUnref<const GrGeometryProcessor> gp(this->createRectGP());
if (!gp) {
SkDebugf("Could not create GrGeometryProcessor\n");
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
@@ -73,7 +73,7 @@ public:
vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorLocalCoordAttr) :
vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
QuadHelper helper;
- void* vertices = helper.init(batchTarget, vertexStride, instanceCount);
+ void* vertices = helper.init(target, vertexStride, instanceCount);
if (!vertices) {
return;
@@ -110,7 +110,7 @@ public:
}
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
diff --git a/src/gpu/batches/GrDrawAtlasBatch.cpp b/src/gpu/batches/GrDrawAtlasBatch.cpp
index 487580c39d..3596e16861 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.cpp
+++ b/src/gpu/batches/GrDrawAtlasBatch.cpp
@@ -6,6 +6,7 @@
*/
#include "GrDrawAtlasBatch.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "SkGr.h"
#include "SkRandom.h"
@@ -41,14 +42,14 @@ static const GrGeometryProcessor* set_vertex_attributes(bool hasColors,
return GrDefaultGeoProcFactory::Create(gpColor, coverage, localCoords, viewMatrix);
}
-void GrDrawAtlasBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrDrawAtlasBatch::onPrepareDraws(Target* target) {
// Setup geometry processor
SkAutoTUnref<const GrGeometryProcessor> gp(set_vertex_attributes(this->hasColors(),
this->color(),
this->viewMatrix(),
this->coverageIgnored()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
int instanceCount = fGeoData.count();
size_t vertexStride = gp->getVertexStride();
@@ -57,7 +58,7 @@ void GrDrawAtlasBatch::generateGeometry(GrBatchTarget* batchTarget) {
QuadHelper helper;
int numQuads = this->quadCount();
- void* verts = helper.init(batchTarget, vertexStride, numQuads);
+ void* verts = helper.init(target, vertexStride, numQuads);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
return;
@@ -71,7 +72,7 @@ void GrDrawAtlasBatch::generateGeometry(GrBatchTarget* batchTarget) {
memcpy(vertPtr, args.fVerts.begin(), allocSize);
vertPtr += allocSize;
}
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
GrDrawAtlasBatch::GrDrawAtlasBatch(const Geometry& geometry, const SkMatrix& viewMatrix,
diff --git a/src/gpu/batches/GrDrawAtlasBatch.h b/src/gpu/batches/GrDrawAtlasBatch.h
index 6e353ef647..c7ee9f3e37 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.h
+++ b/src/gpu/batches/GrDrawAtlasBatch.h
@@ -42,11 +42,12 @@ public:
}
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
GrDrawAtlasBatch(const Geometry& geometry, const SkMatrix& viewMatrix, int spriteCount,
const SkRSXform* xforms, const SkRect* rects, const SkColor* colors);
diff --git a/src/gpu/batches/GrDrawBatch.h b/src/gpu/batches/GrDrawBatch.h
index 8ac758eca0..bbebe5b98c 100644
--- a/src/gpu/batches/GrDrawBatch.h
+++ b/src/gpu/batches/GrDrawBatch.h
@@ -14,10 +14,32 @@
struct GrInitInvariantOutput;
/**
+ * GrDrawBatches are flushed in two phases (preDraw, and draw). In preDraw uploads to GrGpuResources
+ * and draws are determined and scheduled. They are issued in the draw phase. GrBatchToken is used
+ * to sequence the uploads relative to each other and to draws.
+ **/
+
+typedef uint64_t GrBatchToken;
+
+class GrBatchUploader : public SkRefCnt {
+public:
+ class TextureUploader;
+
+ GrBatchUploader(GrBatchToken lastUploadToken) : fLastUploadToken(lastUploadToken) {}
+ GrBatchToken lastUploadToken() const { return fLastUploadToken; }
+ virtual void upload(TextureUploader*)=0;
+
+private:
+ GrBatchToken fLastUploadToken;
+};
+
+/**
* Base class for GrBatches that draw. These batches have a GrPipeline installed by GrDrawTarget.
*/
class GrDrawBatch : public GrBatch {
public:
+ class Target;
+
GrDrawBatch();
~GrDrawBatch() override;
@@ -41,8 +63,12 @@ private:
*/
virtual void initBatchTracker(const GrPipelineOptimizations&) = 0;
- SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
- bool fPipelineInstalled;
+protected:
+ SkTArray<SkAutoTUnref<GrBatchUploader>, true> fInlineUploads;
+
+private:
+ SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
+ bool fPipelineInstalled;
typedef GrBatch INHERITED;
};
diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp
index ebfda36296..bca7d230bc 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.cpp
+++ b/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -7,7 +7,7 @@
#include "GrDrawVerticesBatch.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrInvariantOutput.h"
#include "GrDefaultGeoProcFactory.h"
@@ -107,14 +107,14 @@ void GrDrawVerticesBatch::initBatchTracker(const GrPipelineOptimizations& opt) {
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
-void GrDrawVerticesBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrDrawVerticesBatch::onPrepareDraws(Target* target) {
int colorOffset = -1, texOffset = -1;
SkAutoTUnref<const GrGeometryProcessor> gp(
set_vertex_attributes(this->hasLocalCoords(), this->hasColors(), &colorOffset,
&texOffset, this->color(), this->viewMatrix(),
this->coverageIgnored()));
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
@@ -126,8 +126,8 @@ void GrDrawVerticesBatch::generateGeometry(GrBatchTarget* batchTarget) {
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, this->vertexCount(),
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, this->vertexCount(),
+ &vertexBuffer, &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
@@ -139,7 +139,7 @@ void GrDrawVerticesBatch::generateGeometry(GrBatchTarget* batchTarget) {
uint16_t* indices = NULL;
if (this->hasIndices()) {
- indices = batchTarget->makeIndexSpace(this->indexCount(), &indexBuffer, &firstIndex);
+ indices = target->makeIndexSpace(this->indexCount(), &indexBuffer, &firstIndex);
if (!indices) {
SkDebugf("Could not allocate indices\n");
@@ -180,7 +180,7 @@ void GrDrawVerticesBatch::generateGeometry(GrBatchTarget* batchTarget) {
} else {
vertices.init(this->primitiveType(), vertexBuffer, firstVertex, this->vertexCount());
}
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
diff --git a/src/gpu/batches/GrDrawVerticesBatch.h b/src/gpu/batches/GrDrawVerticesBatch.h
index 5e9628bc16..8864b16898 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.h
+++ b/src/gpu/batches/GrDrawVerticesBatch.h
@@ -15,7 +15,7 @@
#include "SkRect.h"
#include "SkTDArray.h"
-class GrBatchTarget;
+class GrBatchFlushState;
struct GrInitInvariantOutput;
class GrDrawVerticesBatch : public GrVertexBatch {
@@ -47,11 +47,11 @@ public:
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
private:
+ void onPrepareDraws(Target*) override;
+
GrDrawVerticesBatch(const Geometry& geometry, GrPrimitiveType primitiveType,
const SkMatrix& viewMatrix,
const SkPoint* positions, int vertexCount,
diff --git a/src/gpu/batches/GrStrokeRectBatch.cpp b/src/gpu/batches/GrStrokeRectBatch.cpp
index 649ba01ddd..766302bf5b 100644
--- a/src/gpu/batches/GrStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrStrokeRectBatch.cpp
@@ -7,6 +7,7 @@
#include "GrStrokeRectBatch.h"
#include "GrBatchTest.h"
+#include "GrBatchFlushState.h"
#include "SkRandom.h"
GrStrokeRectBatch::GrStrokeRectBatch(const Geometry& geometry, bool snapToPixelCenters) {
@@ -65,8 +66,7 @@ static void init_stroke_rect_strip(SkPoint verts[10], const SkRect& rect, SkScal
verts[9] = verts[1];
}
-
-void GrStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
+void GrStrokeRectBatch::onPrepareDraws(Target* target) {
SkAutoTUnref<const GrGeometryProcessor> gp;
{
using namespace GrDefaultGeoProcFactory;
@@ -79,7 +79,7 @@ void GrStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
this->viewMatrix()));
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
size_t vertexStride = gp->getVertexStride();
@@ -95,8 +95,7 @@ void GrStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
const GrVertexBuffer* vertexBuffer;
int firstVertex;
- void* verts = batchTarget->makeVertSpace(vertexStride, vertexCount,
- &vertexBuffer, &firstVertex);
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
if (!verts) {
SkDebugf("Could not allocate vertices\n");
@@ -123,7 +122,7 @@ void GrStrokeRectBatch::generateGeometry(GrBatchTarget* batchTarget) {
GrVertices vertices;
vertices.init(primType, vertexBuffer, firstVertex, vertexCount);
- batchTarget->draw(vertices);
+ target->draw(vertices);
}
#ifdef GR_TEST_UTILS
diff --git a/src/gpu/batches/GrStrokeRectBatch.h b/src/gpu/batches/GrStrokeRectBatch.h
index 31b11d9c32..2e81cc237f 100644
--- a/src/gpu/batches/GrStrokeRectBatch.h
+++ b/src/gpu/batches/GrStrokeRectBatch.h
@@ -38,9 +38,9 @@ public:
void initBatchTracker(const GrPipelineOptimizations&) override;
- void generateGeometry(GrBatchTarget* batchTarget) override;
-
private:
+ void onPrepareDraws(Target*) override;
+
GrStrokeRectBatch(const Geometry& geometry, bool snapToPixelCenters);
GrColor color() const { return fBatch.fColor; }
diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h
index f72ea6143d..085b184a28 100644
--- a/src/gpu/batches/GrTestBatch.h
+++ b/src/gpu/batches/GrTestBatch.h
@@ -8,7 +8,7 @@
#ifndef GrTestBatch_DEFINED
#define GrTestBatch_DEFINED
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrGeometryProcessor.h"
#include "GrVertexBuffer.h"
@@ -49,12 +49,6 @@ public:
fBatch.fCoverageIgnored = !opt.readsCoverage();
}
- void generateGeometry(GrBatchTarget* batchTarget) override {
- batchTarget->initDraw(fGeometryProcessor, this->pipeline());
-
- this->onGenerateGeometry(batchTarget);
- }
-
protected:
GrTestBatch(const GrGeometryProcessor* gp, const SkRect& bounds) {
fGeometryProcessor.reset(SkRef(gp));
@@ -65,6 +59,11 @@ protected:
const GrGeometryProcessor* geometryProcessor() const { return fGeometryProcessor; }
private:
+ void onPrepareDraws(Target* target) override {
+ target->initDraw(fGeometryProcessor, this->pipeline());
+ this->generateGeometry(target);
+ }
+
virtual Geometry* geoData(int index) = 0;
virtual const Geometry* geoData(int index) const = 0;
@@ -72,7 +71,7 @@ private:
return false;
}
- virtual void onGenerateGeometry(GrBatchTarget* batchTarget) = 0;
+ virtual void generateGeometry(Target*) = 0;
struct BatchTracker {
GrColor fColor;
diff --git a/src/gpu/batches/GrVertexBatch.cpp b/src/gpu/batches/GrVertexBatch.cpp
index e800422818..6081e26371 100644
--- a/src/gpu/batches/GrVertexBatch.cpp
+++ b/src/gpu/batches/GrVertexBatch.cpp
@@ -6,24 +6,28 @@
*/
#include "GrVertexBatch.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrResourceProvider.h"
-GrVertexBatch::GrVertexBatch() : fNumberOfDraws(0) {}
+GrVertexBatch::GrVertexBatch() : fDrawArrays(1) {}
-void* GrVertexBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType primType,
- size_t vertexStride, const GrIndexBuffer* indexBuffer,
- int verticesPerInstance, int indicesPerInstance,
- int instancesToDraw) {
- SkASSERT(batchTarget);
+void GrVertexBatch::prepareDraws(GrBatchFlushState* state) {
+ Target target(state, this);
+ this->onPrepareDraws(&target);
+}
+
+void* GrVertexBatch::InstancedHelper::init(Target* target, GrPrimitiveType primType,
+ size_t vertexStride, const GrIndexBuffer* indexBuffer,
+ int verticesPerInstance, int indicesPerInstance,
+ int instancesToDraw) {
+ SkASSERT(target);
if (!indexBuffer) {
return NULL;
}
const GrVertexBuffer* vertexBuffer;
int firstVertex;
int vertexCount = verticesPerInstance * instancesToDraw;
- void* vertices = batchTarget->makeVertSpace(vertexStride, vertexCount,
- &vertexBuffer, &firstVertex);
+ void* vertices = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
if (!vertices) {
SkDebugf("Vertices could not be allocated for instanced rendering.");
return NULL;
@@ -38,14 +42,45 @@ void* GrVertexBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimiti
return vertices;
}
-void* GrVertexBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride,
+void GrVertexBatch::InstancedHelper::recordDraw(Target* target) {
+ SkASSERT(fVertices.instanceCount());
+ target->draw(fVertices);
+}
+
+void* GrVertexBatch::QuadHelper::init(Target* target, size_t vertexStride,
int quadsToDraw) {
SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer(
- batchTarget->resourceProvider()->refQuadIndexBuffer());
+ target->resourceProvider()->refQuadIndexBuffer());
if (!quadIndexBuffer) {
SkDebugf("Could not get quad index buffer.");
return NULL;
}
- return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
+ return this->INHERITED::init(target, kTriangles_GrPrimitiveType, vertexStride,
quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
}
+
+void GrVertexBatch::issueDraws(GrBatchFlushState* state) {
+ int uploadCnt = fInlineUploads.count();
+ int currUpload = 0;
+
+ // Iterate of all the drawArrays. Before issuing the draws in each array, perform any inline
+ // uploads.
+ for (SkTLList<DrawArray>::Iter da(fDrawArrays); da.get(); da.next()) {
+ state->advanceLastFlushedToken();
+ while (currUpload < uploadCnt &&
+ fInlineUploads[currUpload]->lastUploadToken() <= state->lastFlushedToken()) {
+ fInlineUploads[currUpload++]->upload(state->uploader());
+ }
+ const GrVertexBatch::DrawArray& drawArray = *da.get();
+ GrProgramDesc desc;
+ const GrPipeline* pipeline = this->pipeline();
+ const GrPrimitiveProcessor* primProc = drawArray.fPrimitiveProcessor.get();
+ state->gpu()->buildProgramDesc(&desc, *primProc, *pipeline, fBatchTracker);
+ GrGpu::DrawArgs args(primProc, pipeline, &desc, &fBatchTracker);
+
+ int drawCount = drawArray.fDraws.count();
+ for (int i = 0; i < drawCount; i++) {
+ state->gpu()->draw(args, drawArray.fDraws[i]);
+ }
+ }
+}
diff --git a/src/gpu/batches/GrVertexBatch.h b/src/gpu/batches/GrVertexBatch.h
index 882cfa0c8d..b868962411 100644
--- a/src/gpu/batches/GrVertexBatch.h
+++ b/src/gpu/batches/GrVertexBatch.h
@@ -9,20 +9,25 @@
#define GrVertexBatch_DEFINED
#include "GrDrawBatch.h"
-#include "GrBatchTarget.h"
+#include "GrPrimitiveProcessor.h"
+#include "GrPendingProgramElement.h"
+#include "GrVertices.h"
+
+#include "SkTLList.h"
+
+class GrBatchFlushState;
/**
* Base class for vertex-based GrBatches.
*/
class GrVertexBatch : public GrDrawBatch {
public:
- GrVertexBatch();
+ class Target;
- virtual void generateGeometry(GrBatchTarget*) = 0;
+ GrVertexBatch();
- // TODO this goes away when batches are everywhere
- void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; }
- int numberOfDraws() const { return fNumberOfDraws; }
+ void prepareDraws(GrBatchFlushState* state);
+ void issueDraws(GrBatchFlushState* state);
protected:
/** Helper for rendering instances using an instanced index index buffer. This class creates the
@@ -32,15 +37,12 @@ protected:
InstancedHelper() {}
/** Returns the allocated storage for the vertices. The caller should populate the before
vertices before calling issueDraws(). */
- void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStride,
+ void* init(Target*, GrPrimitiveType, size_t vertexStride,
const GrIndexBuffer*, int verticesPerInstance, int indicesPerInstance,
int instancesToDraw);
/** Call after init() to issue draws to the batch target.*/
- void issueDraw(GrBatchTarget* batchTarget) {
- SkASSERT(fVertices.instanceCount());
- batchTarget->draw(fVertices);
- }
+ void recordDraw(Target* target);
private:
GrVertices fVertices;
};
@@ -55,16 +57,31 @@ protected:
/** Finds the cached quad index buffer and reserves vertex space. Returns NULL on failure
and on sucess a pointer to the vertex data that the caller should populate before
calling issueDraws(). */
- void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw);
-
- using InstancedHelper::issueDraw;
+ void* init(Target* batchTarget, size_t vertexStride, int quadsToDraw);
+ using InstancedHelper::recordDraw;
private:
typedef InstancedHelper INHERITED;
};
private:
- int fNumberOfDraws;
+ virtual void onPrepareDraws(Target*) = 0;
+
+ // A set of contiguous draws with no inline uploads between them that all use the same
+ // primitive processor. All the draws in a DrawArray share a primitive processor and use the
+ // the batch's GrPipeline.
+ struct DrawArray {
+ SkSTArray<1, GrVertices, true> fDraws;
+ GrPendingProgramElement<const GrPrimitiveProcessor> fPrimitiveProcessor;
+ };
+
+ // Array of DrawArray. There may be inline uploads between each DrawArray and each DrawArray
+ // may use a different primitive processor.
+ SkTLList<DrawArray> fDrawArrays;
+
+ // What is this?
+ GrBatchTracker fBatchTracker;
+
typedef GrDrawBatch INHERITED;
};
diff --git a/src/gpu/effects/GrDashingEffect.cpp b/src/gpu/effects/GrDashingEffect.cpp
index 9da8c69f24..e888f6d88c 100644
--- a/src/gpu/effects/GrDashingEffect.cpp
+++ b/src/gpu/effects/GrDashingEffect.cpp
@@ -7,7 +7,7 @@
#include "GrDashingEffect.h"
-#include "GrBatchTarget.h"
+#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
#include "GrCaps.h"
#include "GrGeometryProcessor.h"
@@ -298,7 +298,7 @@ public:
bool fHasEndRect;
};
- void generateGeometry(GrBatchTarget* batchTarget) override {
+ void onPrepareDraws(Target* target) override {
int instanceCount = fGeoData.count();
SkPaint::Cap cap = this->cap();
bool isRoundCap = SkPaint::kRound_Cap == cap;
@@ -324,7 +324,7 @@ public:
return;
}
- batchTarget->initDraw(gp, this->pipeline());
+ target->initDraw(gp, this->pipeline());
// useAA here means Edge AA or MSAA
bool useAA = this->aaMode() != kBW_DashAAMode;
@@ -529,7 +529,7 @@ public:
}
QuadHelper helper;
- void* vertices = helper.init(batchTarget, gp->getVertexStride(), totalRectCount);
+ void* vertices = helper.init(target, gp->getVertexStride(), totalRectCount);
if (!vertices) {
return;
}
@@ -591,7 +591,7 @@ public:
rectIndex++;
}
SkASSERT(0 == (curVIdx % 4) && (curVIdx / 4) == totalRectCount);
- helper.issueDraw(batchTarget);
+ helper.recordDraw(target);
}
SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }