aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--gyp/gpu.gypi1
-rw-r--r--gyp/tests.gypi1
-rw-r--r--src/gpu/GrInOrderDrawBuffer.cpp353
-rw-r--r--src/gpu/GrInOrderDrawBuffer.h152
-rw-r--r--src/gpu/GrTRecorder.h251
-rw-r--r--tests/GrTRecorderTest.cpp245
6 files changed, 714 insertions, 289 deletions
diff --git a/gyp/gpu.gypi b/gyp/gpu.gypi
index a853cf5675..e8d2f63345 100644
--- a/gyp/gpu.gypi
+++ b/gyp/gpu.gypi
@@ -162,6 +162,7 @@
'<(skia_src_path)/gpu/GrTexture.cpp',
'<(skia_src_path)/gpu/GrTexturePriv.h',
'<(skia_src_path)/gpu/GrTextureAccess.cpp',
+ '<(skia_src_path)/gpu/GrTRecorder.h',
'<(skia_src_path)/gpu/GrVertexBuffer.h',
'<(skia_src_path)/gpu/effects/Gr1DKernelEffect.h',
diff --git a/gyp/tests.gypi b/gyp/tests.gypi
index fc0d69d0bf..6a2032646b 100644
--- a/gyp/tests.gypi
+++ b/gyp/tests.gypi
@@ -115,6 +115,7 @@
'../tests/GrRedBlackTreeTest.cpp',
'../tests/GrSurfaceTest.cpp',
'../tests/GrTBSearchTest.cpp',
+ '../tests/GrTRecorderTest.cpp',
'../tests/GradientTest.cpp',
'../tests/ImageCacheTest.cpp',
'../tests/ImageDecodingTest.cpp',
diff --git a/src/gpu/GrInOrderDrawBuffer.cpp b/src/gpu/GrInOrderDrawBuffer.cpp
index b9e84c0a99..753a379ff8 100644
--- a/src/gpu/GrInOrderDrawBuffer.cpp
+++ b/src/gpu/GrInOrderDrawBuffer.cpp
@@ -18,6 +18,9 @@ GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
GrVertexBufferAllocPool* vertexPool,
GrIndexBufferAllocPool* indexPool)
: GrDrawTarget(gpu->getContext())
+ , fCmdBuffer(kCmdBufferInitialSizeInBytes)
+ , fLastState(NULL)
+ , fLastClip(NULL)
, fDstGpu(gpu)
, fClipSet(true)
, fClipProxyState(kUnknown_ClipProxyState)
@@ -216,6 +219,7 @@ bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
}
int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
+ SkASSERT(!fCmdBuffer.empty());
SkASSERT(info.isInstanced());
const GeometrySrcState& geomSrc = this->getGeomSrc();
@@ -230,17 +234,17 @@ int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
}
// Check if there is a draw info that is compatible that uses the same VB from the pool and
// the same IB
- if (kDraw_Cmd != strip_trace_bit(fCmds.back())) {
+ if (kDraw_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
return 0;
}
- Draw* draw = &fDraws.back();
+ Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
GeometryPoolState& poolState = fGeoPoolStateStack.back();
const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
- if (!draw->isInstanced() ||
- draw->verticesPerInstance() != info.verticesPerInstance() ||
- draw->indicesPerInstance() != info.indicesPerInstance() ||
+ if (!draw->fInfo.isInstanced() ||
+ draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
+ draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
draw->vertexBuffer() != vertexBuffer ||
draw->indexBuffer() != geomSrc.fIndexBuffer) {
return 0;
@@ -248,15 +252,15 @@ int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
// info does not yet account for the offset from the start of the pool's VB while the previous
// draw record does.
int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
- if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) {
+ if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != adjustedStartVertex) {
return 0;
}
- SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
+ SkASSERT(poolState.fPoolStartVertex == draw->fInfo.startVertex() + draw->fInfo.vertexCount());
// how many instances can be concat'ed onto draw given the size of the index buffer
int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
- instancesToConcat -= draw->instanceCount();
+ instancesToConcat -= draw->fInfo.instanceCount();
instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
// update the amount of reserved vertex data actually referenced in draws
@@ -264,15 +268,15 @@ int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
drawState.getVertexStride();
poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
- draw->adjustInstanceCount(instancesToConcat);
+ draw->fInfo.adjustInstanceCount(instancesToConcat);
// update last fGpuCmdMarkers to include any additional trace markers that have been added
if (this->getActiveTraceMarkers().count() > 0) {
- if (cmd_has_trace_marker(fCmds.back())) {
+ if (cmd_has_trace_marker(draw->fType)) {
fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers());
} else {
fGpuCmdMarkers.push_back(this->getActiveTraceMarkers());
- fCmds.back() = add_trace_bit(fCmds.back());
+ draw->fType = add_trace_bit(draw->fType);
}
}
@@ -309,9 +313,7 @@ void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
acr.set(this->drawState());
}
- if (this->needsNewClip()) {
- this->recordClip();
- }
+ this->recordClipIfNecessary();
this->recordStateIfNecessary();
const GrVertexBuffer* vb;
@@ -334,52 +336,51 @@ void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
if (info.isInstanced()) {
int instancesConcated = this->concatInstancedDraw(info);
if (info.instanceCount() > instancesConcated) {
- draw = this->recordDraw(info, vb, ib);
- draw->adjustInstanceCount(-instancesConcated);
+ draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib));
+ draw->fInfo.adjustInstanceCount(-instancesConcated);
} else {
return;
}
} else {
- draw = this->recordDraw(info, vb, ib);
+ draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info, vb, ib));
}
+ this->recordTraceMarkersIfNecessary();
// Adjust the starting vertex and index when we are using reserved or array sources to
// compensate for the fact that the data was inserted into a larger vb/ib owned by the pool.
if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) {
size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.getVertexStride();
poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, bytes);
- draw->adjustStartVertex(poolState.fPoolStartVertex);
+ draw->fInfo.adjustStartVertex(poolState.fPoolStartVertex);
}
if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndexSrc) {
size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, bytes);
- draw->adjustStartIndex(poolState.fPoolStartIndex);
+ draw->fInfo.adjustStartIndex(poolState.fPoolStartIndex);
}
}
void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) {
- if (this->needsNewClip()) {
- this->recordClip();
- }
+ this->recordClipIfNecessary();
// Only compare the subset of GrDrawState relevant to path stenciling?
this->recordStateIfNecessary();
- StencilPath* sp = this->recordStencilPath(path);
+ StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, (path));
sp->fFill = fill;
+ this->recordTraceMarkersIfNecessary();
}
void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) {
- if (this->needsNewClip()) {
- this->recordClip();
- }
+ this->recordClipIfNecessary();
// TODO: Only compare the subset of GrDrawState relevant to path covering?
this->recordStateIfNecessary();
- DrawPath* cp = this->recordDrawPath(path);
- cp->fFill = fill;
+ DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
+ dp->fFill = fill;
if (dstCopy) {
- cp->fDstCopy = *dstCopy;
+ dp->fDstCopy = *dstCopy;
}
+ this->recordTraceMarkersIfNecessary();
}
void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange,
@@ -390,25 +391,25 @@ void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange,
SkASSERT(indices);
SkASSERT(transforms);
- if (this->needsNewClip()) {
- this->recordClip();
- }
+ this->recordClipIfNecessary();
this->recordStateIfNecessary();
- DrawPaths* dp = this->recordDrawPaths(pathRange);
- dp->fIndices = SkNEW_ARRAY(uint32_t, count); // TODO: Accomplish this without a malloc
- memcpy(dp->fIndices, indices, sizeof(uint32_t) * count);
- dp->fCount = count;
- const int transformsLength = GrPathRendering::PathTransformSize(transformsType) * count;
- dp->fTransforms = SkNEW_ARRAY(float, transformsLength);
- memcpy(dp->fTransforms, transforms, sizeof(float) * transformsLength);
- dp->fTransformsType = transformsType;
+ int sizeOfIndices = sizeof(uint32_t) * count;
+ int sizeOfTransforms = sizeof(float) * count *
+ GrPathRendering::PathTransformSize(transformsType);
+ DrawPaths* dp = GrNEW_APPEND_WITH_DATA_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange),
+ sizeOfIndices + sizeOfTransforms);
+ memcpy(dp->indices(), indices, sizeOfIndices);
+ dp->fCount = count;
+ memcpy(dp->transforms(), transforms, sizeOfTransforms);
+ dp->fTransformsType = transformsType;
dp->fFill = fill;
-
if (dstCopy) {
dp->fDstCopy = *dstCopy;
}
+
+ this->recordTraceMarkersIfNecessary();
}
void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
@@ -425,11 +426,12 @@ void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
rect = &r;
}
- Clear* clr = this->recordClear(renderTarget);
+ Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
GrColorIsPMAssert(color);
clr->fColor = color;
clr->fRect = *rect;
clr->fCanIgnoreRect = canIgnoreRect;
+ this->recordTraceMarkersIfNecessary();
}
void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
@@ -440,26 +442,21 @@ void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
renderTarget = this->drawState()->getRenderTarget();
SkASSERT(renderTarget);
}
- Clear* clr = this->recordClear(renderTarget);
+ Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
clr->fColor = GrColor_ILLEGAL;
+ this->recordTraceMarkersIfNecessary();
}
void GrInOrderDrawBuffer::reset() {
SkASSERT(1 == fGeoPoolStateStack.count());
this->resetVertexSource();
this->resetIndexSource();
-
- fCmds.reset();
- fDraws.reset();
- fStencilPaths.reset();
- fDrawPath.reset();
- fDrawPaths.reset();
- fStates.reset();
- fClears.reset();
+
+ fCmdBuffer.reset();
+ fLastState = NULL;
+ fLastClip = NULL;
fVertexPool.reset();
fIndexPool.reset();
- fClips.reset();
- fCopySurfaces.reset();
fGpuCmdMarkers.reset();
fClipSet = true;
}
@@ -474,8 +471,7 @@ void GrInOrderDrawBuffer::flush() {
SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
- int numCmds = fCmds.count();
- if (0 == numCmds) {
+ if (fCmdBuffer.empty()) {
return;
}
@@ -490,113 +486,35 @@ void GrInOrderDrawBuffer::flush() {
GrDrawState* prevDrawState = SkRef(fDstGpu->drawState());
- GrClipData clipData;
-
- StateAllocator::Iter stateIter(&fStates);
- ClipAllocator::Iter clipIter(&fClips);
- ClearAllocator::Iter clearIter(&fClears);
- DrawAllocator::Iter drawIter(&fDraws);
- StencilPathAllocator::Iter stencilPathIter(&fStencilPaths);
- DrawPathAllocator::Iter drawPathIter(&fDrawPath);
- DrawPathsAllocator::Iter drawPathsIter(&fDrawPaths);
- CopySurfaceAllocator::Iter copySurfaceIter(&fCopySurfaces);
-
- int currCmdMarker = 0;
+ CmdBuffer::Iter iter(fCmdBuffer);
+ int currCmdMarker = 0;
fDstGpu->saveActiveTraceMarkers();
- for (int c = 0; c < numCmds; ++c) {
+
+ while (iter.next()) {
GrGpuTraceMarker newMarker("", -1);
SkString traceString;
- if (cmd_has_trace_marker(fCmds[c])) {
+ if (cmd_has_trace_marker(iter->fType)) {
traceString = fGpuCmdMarkers[currCmdMarker].toString();
newMarker.fMarker = traceString.c_str();
fDstGpu->addGpuTraceMarker(&newMarker);
++currCmdMarker;
}
- switch (strip_trace_bit(fCmds[c])) {
- case kDraw_Cmd: {
- SkASSERT(fDstGpu->drawState() != prevDrawState);
- SkAssertResult(drawIter.next());
- fDstGpu->setVertexSourceToBuffer(drawIter->vertexBuffer());
- if (drawIter->isIndexed()) {
- fDstGpu->setIndexSourceToBuffer(drawIter->indexBuffer());
- }
- fDstGpu->executeDraw(*drawIter);
- break;
- }
- case kStencilPath_Cmd: {
- SkASSERT(fDstGpu->drawState() != prevDrawState);
- SkAssertResult(stencilPathIter.next());
- fDstGpu->stencilPath(stencilPathIter->path(), stencilPathIter->fFill);
- break;
- }
- case kDrawPath_Cmd: {
- SkASSERT(fDstGpu->drawState() != prevDrawState);
- SkAssertResult(drawPathIter.next());
- fDstGpu->executeDrawPath(drawPathIter->path(), drawPathIter->fFill,
- drawPathIter->fDstCopy.texture() ?
- &drawPathIter->fDstCopy :
- NULL);
- break;
- }
- case kDrawPaths_Cmd: {
- SkASSERT(fDstGpu->drawState() != prevDrawState);
- SkAssertResult(drawPathsIter.next());
- const GrDeviceCoordTexture* dstCopy =
- drawPathsIter->fDstCopy.texture() ? &drawPathsIter->fDstCopy : NULL;
- fDstGpu->executeDrawPaths(drawPathsIter->pathRange(),
- drawPathsIter->fIndices,
- drawPathsIter->fCount,
- drawPathsIter->fTransforms,
- drawPathsIter->fTransformsType,
- drawPathsIter->fFill,
- dstCopy);
- break;
- }
- case kSetState_Cmd:
- SkAssertResult(stateIter.next());
- fDstGpu->setDrawState(stateIter.get());
- break;
- case kSetClip_Cmd:
- SkAssertResult(clipIter.next());
- clipData.fClipStack = &clipIter->fStack;
- clipData.fOrigin = clipIter->fOrigin;
- fDstGpu->setClip(&clipData);
- break;
- case kClear_Cmd:
- SkAssertResult(clearIter.next());
- if (GrColor_ILLEGAL == clearIter->fColor) {
- fDstGpu->discard(clearIter->renderTarget());
- } else {
- fDstGpu->clear(&clearIter->fRect,
- clearIter->fColor,
- clearIter->fCanIgnoreRect,
- clearIter->renderTarget());
- }
- break;
- case kCopySurface_Cmd:
- SkAssertResult(copySurfaceIter.next());
- fDstGpu->copySurface(copySurfaceIter->dst(),
- copySurfaceIter->src(),
- copySurfaceIter->fSrcRect,
- copySurfaceIter->fDstPoint);
- break;
- }
- if (cmd_has_trace_marker(fCmds[c])) {
+
+ SkDEBUGCODE(bool isDraw = kDraw_Cmd == strip_trace_bit(iter->fType) ||
+ kStencilPath_Cmd == strip_trace_bit(iter->fType) ||
+ kDrawPath_Cmd == strip_trace_bit(iter->fType) ||
+ kDrawPaths_Cmd == strip_trace_bit(iter->fType));
+ SkASSERT(!isDraw || fDstGpu->drawState() != prevDrawState);
+
+ iter->execute(fDstGpu);
+
+ if (cmd_has_trace_marker(iter->fType)) {
fDstGpu->removeGpuTraceMarker(&newMarker);
}
}
- fDstGpu->restoreActiveTraceMarkers();
- // we should have consumed all the states, clips, etc.
- SkASSERT(!stateIter.next());
- SkASSERT(!clipIter.next());
- SkASSERT(!clearIter.next());
- SkASSERT(!drawIter.next());
- SkASSERT(!copySurfaceIter.next());
- SkASSERT(!stencilPathIter.next());
- SkASSERT(!drawPathIter.next());
- SkASSERT(!drawPathsIter.next());
+ fDstGpu->restoreActiveTraceMarkers();
SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
fDstGpu->setDrawState(prevDrawState);
@@ -605,14 +523,58 @@ void GrInOrderDrawBuffer::flush() {
++fDrawID;
}
+void GrInOrderDrawBuffer::Draw::execute(GrDrawTarget* gpu) {
+ gpu->setVertexSourceToBuffer(this->vertexBuffer());
+ if (fInfo.isIndexed()) {
+ gpu->setIndexSourceToBuffer(this->indexBuffer());
+ }
+ gpu->executeDraw(fInfo);
+}
+
+void GrInOrderDrawBuffer::StencilPath::execute(GrDrawTarget* gpu) {
+ gpu->stencilPath(this->path(), fFill);
+}
+
+void GrInOrderDrawBuffer::DrawPath::execute(GrDrawTarget* gpu) {
+ gpu->executeDrawPath(this->path(), fFill, fDstCopy.texture() ? &fDstCopy : NULL);
+}
+
+void GrInOrderDrawBuffer::DrawPaths::execute(GrDrawTarget* gpu) {
+ gpu->executeDrawPaths(this->pathRange(), this->indices(), fCount, this->transforms(),
+ fTransformsType, fFill, fDstCopy.texture() ? &fDstCopy : NULL);
+}
+
+void GrInOrderDrawBuffer::SetState::execute(GrDrawTarget* gpu) {
+ gpu->setDrawState(&fState);
+}
+
+void GrInOrderDrawBuffer::SetClip::execute(GrDrawTarget* gpu) {
+ // Our fClipData is referenced directly, so we must remain alive for the entire
+ // duration of the flush (after which the gpu's previous clip is restored).
+ gpu->setClip(&fClipData);
+}
+
+void GrInOrderDrawBuffer::Clear::execute(GrDrawTarget* gpu) {
+ if (GrColor_ILLEGAL == fColor) {
+ gpu->discard(this->renderTarget());
+ } else {
+ gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
+ }
+}
+
+void GrInOrderDrawBuffer::CopySurface::execute(GrDrawTarget* gpu) {
+ gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
+}
+
bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint) {
if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
- CopySurface* cs = this->recordCopySurface(dst, src);
+ CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src));
cs->fSrcRect = srcRect;
cs->fDstPoint = dstPoint;
+ this->recordTraceMarkersIfNecessary();
return true;
} else {
return false;
@@ -832,94 +794,55 @@ void GrInOrderDrawBuffer::geometrySourceWillPop(const GeometrySrcState& restored
}
void GrInOrderDrawBuffer::recordStateIfNecessary() {
- if (fStates.empty()) {
- this->convertDrawStateToPendingExec(&fStates.push_back(this->getDrawState()));
- this->addToCmdBuffer(kSetState_Cmd);
+ if (!fLastState) {
+ SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (this->getDrawState()));
+ fLastState = &ss->fState;
+ this->convertDrawStateToPendingExec(fLastState);
+ this->recordTraceMarkersIfNecessary();
return;
}
const GrDrawState& curr = this->getDrawState();
- GrDrawState& prev = fStates.back();
- switch (GrDrawState::CombineIfPossible(prev, curr, *this->caps())) {
+ switch (GrDrawState::CombineIfPossible(*fLastState, curr, *this->caps())) {
case GrDrawState::kIncompatible_CombinedState:
- this->convertDrawStateToPendingExec(&fStates.push_back(curr));
- this->addToCmdBuffer(kSetState_Cmd);
+ fLastState = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (curr))->fState;
+ this->convertDrawStateToPendingExec(fLastState);
+ this->recordTraceMarkersIfNecessary();
break;
case GrDrawState::kA_CombinedState:
case GrDrawState::kAOrB_CombinedState: // Treat the same as kA.
break;
case GrDrawState::kB_CombinedState:
// prev has already been converted to pending execution. That is a one-way ticket.
- // So here we just delete prev and push back a new copy of curr. Note that this
- // goes away when we move GrIODB over to taking optimized snapshots of draw states.
- fStates.pop_back();
- this->convertDrawStateToPendingExec(&fStates.push_back(curr));
+ // So here we just destruct the previous state and reinit with a new copy of curr.
+ // Note that this goes away when we move GrIODB over to taking optimized snapshots
+ // of draw states.
+ fLastState->~GrDrawState();
+ SkNEW_PLACEMENT_ARGS(fLastState, GrDrawState, (curr));
+ this->convertDrawStateToPendingExec(fLastState);
break;
}
}
-bool GrInOrderDrawBuffer::needsNewClip() const {
- if (this->getDrawState().isClipState()) {
- if (fClipSet &&
- (fClips.empty() ||
- fClips.back().fStack != *this->getClip()->fClipStack ||
- fClips.back().fOrigin != this->getClip()->fOrigin)) {
- return true;
- }
+void GrInOrderDrawBuffer::recordClipIfNecessary() {
+ if (this->getDrawState().isClipState() &&
+ fClipSet &&
+ (!fLastClip || *fLastClip != *this->getClip())) {
+ fLastClip = &GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetClip, (this->getClip()))->fClipData;
+ this->recordTraceMarkersIfNecessary();
+ fClipSet = false;
}
- return false;
}
-void GrInOrderDrawBuffer::addToCmdBuffer(uint8_t cmd) {
- SkASSERT(!cmd_has_trace_marker(cmd));
+void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
+ SkASSERT(!fCmdBuffer.empty());
+ SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
if (activeTraceMarkers.count() > 0) {
- fCmds.push_back(add_trace_bit(cmd));
+ fCmdBuffer.back().fType = add_trace_bit(fCmdBuffer.back().fType);
fGpuCmdMarkers.push_back(activeTraceMarkers);
- } else {
- fCmds.push_back(cmd);
}
}
-void GrInOrderDrawBuffer::recordClip() {
- fClips.push_back().fStack = *this->getClip()->fClipStack;
- fClips.back().fOrigin = this->getClip()->fOrigin;
- fClipSet = false;
- this->addToCmdBuffer(kSetClip_Cmd);
-}
-
-GrInOrderDrawBuffer::Draw* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info,
- const GrVertexBuffer* vb,
- const GrIndexBuffer* ib) {
- this->addToCmdBuffer(kDraw_Cmd);
- return GrNEW_APPEND_TO_ALLOCATOR(&fDraws, Draw, (info, vb, ib));
-}
-
-GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath(const GrPath* path) {
- this->addToCmdBuffer(kStencilPath_Cmd);
- return GrNEW_APPEND_TO_ALLOCATOR(&fStencilPaths, StencilPath, (path));
-}
-
-GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath(const GrPath* path) {
- this->addToCmdBuffer(kDrawPath_Cmd);
- return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPath, DrawPath, (path));
-}
-
-GrInOrderDrawBuffer::DrawPaths* GrInOrderDrawBuffer::recordDrawPaths(const GrPathRange* pathRange) {
- this->addToCmdBuffer(kDrawPaths_Cmd);
- return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPaths, DrawPaths, (pathRange));
-}
-
-GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear(GrRenderTarget* rt) {
- this->addToCmdBuffer(kClear_Cmd);
- return GrNEW_APPEND_TO_ALLOCATOR(&fClears, Clear, (rt));
-}
-
-GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface(GrSurface* dst,
- GrSurface* src) {
- this->addToCmdBuffer(kCopySurface_Cmd);
- return GrNEW_APPEND_TO_ALLOCATOR(&fCopySurfaces, CopySurface, (dst, src));
-}
-
void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
INHERITED::clipWillBeSet(newClipData);
fClipSet = true;
diff --git a/src/gpu/GrInOrderDrawBuffer.h b/src/gpu/GrInOrderDrawBuffer.h
index 485de07db5..5d0fc0440d 100644
--- a/src/gpu/GrInOrderDrawBuffer.h
+++ b/src/gpu/GrInOrderDrawBuffer.h
@@ -16,6 +16,7 @@
#include "GrPath.h"
#include "GrPathRange.h"
#include "GrSurface.h"
+#include "GrTRecorder.h"
#include "GrVertexBuffer.h"
#include "SkClipStack.h"
@@ -87,7 +88,7 @@ protected:
virtual void clipWillBeSet(const GrClipData* newClip) SK_OVERRIDE;
private:
- enum Cmd {
+ enum {
kDraw_Cmd = 1,
kStencilPath_Cmd = 2,
kSetState_Cmd = 3,
@@ -98,37 +99,54 @@ private:
kDrawPaths_Cmd = 8,
};
- class Draw : public DrawInfo {
- public:
+ struct Cmd : ::SkNoncopyable {
+ Cmd(uint8_t type) : fType(type) {}
+ virtual ~Cmd() {}
+
+ virtual void execute(GrDrawTarget*) = 0;
+
+ uint8_t fType;
+ };
+
+ struct Draw : public Cmd {
Draw(const DrawInfo& info, const GrVertexBuffer* vb, const GrIndexBuffer* ib)
- : DrawInfo(info)
+ : Cmd(kDraw_Cmd)
+ , fInfo(info)
, fVertexBuffer(vb)
, fIndexBuffer(ib) {}
const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get(); }
const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); }
+ virtual void execute(GrDrawTarget*);
+
+ DrawInfo fInfo;
+
private:
GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuffer;
GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffer;
};
- struct StencilPath : public ::SkNoncopyable {
- StencilPath(const GrPath* path) : fPath(path) {}
+ struct StencilPath : public Cmd {
+ StencilPath(const GrPath* path) : Cmd(kStencilPath_Cmd), fPath(path) {}
const GrPath* path() const { return fPath.get(); }
+ virtual void execute(GrDrawTarget*);
+
SkPath::FillType fFill;
private:
GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
};
- struct DrawPath : public ::SkNoncopyable {
- DrawPath(const GrPath* path) : fPath(path) {}
+ struct DrawPath : public Cmd {
+ DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {}
const GrPath* path() const { return fPath.get(); }
+ virtual void execute(GrDrawTarget*);
+
SkPath::FillType fFill;
GrDeviceCoordTexture fDstCopy;
@@ -136,24 +154,16 @@ private:
GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
};
- struct DrawPaths : public ::SkNoncopyable {
- DrawPaths(const GrPathRange* pathRange)
- : fPathRange(pathRange) {}
-
- ~DrawPaths() {
- if (fTransforms) {
- SkDELETE_ARRAY(fTransforms);
- }
- if (fIndices) {
- SkDELETE_ARRAY(fIndices);
- }
- }
+ struct DrawPaths : public Cmd {
+ DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRange(pathRange) {}
const GrPathRange* pathRange() const { return fPathRange.get(); }
+ uint32_t* indices() { return reinterpret_cast<uint32_t*>(CmdBuffer::GetDataForItem(this)); }
+ float* transforms() { return reinterpret_cast<float*>(&this->indices()[fCount]); }
+
+ virtual void execute(GrDrawTarget*);
- uint32_t* fIndices;
size_t fCount;
- float* fTransforms;
PathTransformType fTransformsType;
SkPath::FillType fFill;
GrDeviceCoordTexture fDstCopy;
@@ -163,11 +173,13 @@ private:
};
// This is also used to record a discard by setting the color to GrColor_ILLEGAL
- struct Clear : public ::SkNoncopyable {
- Clear(GrRenderTarget* rt) : fRenderTarget(rt) {}
- ~Clear() { }
+ struct Clear : public Cmd {
+ Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {}
+
GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
+ virtual void execute(GrDrawTarget*);
+
SkIRect fRect;
GrColor fColor;
bool fCanIgnoreRect;
@@ -176,12 +188,14 @@ private:
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
};
- struct CopySurface : public ::SkNoncopyable {
- CopySurface(GrSurface* dst, GrSurface* src) : fDst(dst), fSrc(src) {}
+ struct CopySurface : public Cmd {
+ CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDst(dst), fSrc(src) {}
GrSurface* dst() const { return fDst.get(); }
GrSurface* src() const { return fSrc.get(); }
+ virtual void execute(GrDrawTarget*);
+
SkIPoint fDstPoint;
SkIRect fSrcRect;
@@ -190,11 +204,33 @@ private:
GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc;
};
- struct Clip : public ::SkNoncopyable {
- SkClipStack fStack;
- SkIPoint fOrigin;
+ struct SetState : public Cmd {
+ SetState(const GrDrawState& state) : Cmd(kSetState_Cmd), fState(state) {}
+
+ virtual void execute(GrDrawTarget*);
+
+ GrDrawState fState;
+ };
+
+ struct SetClip : public Cmd {
+ SetClip(const GrClipData* clipData)
+ : Cmd(kSetClip_Cmd),
+ fStackStorage(*clipData->fClipStack) {
+ fClipData.fClipStack = &fStackStorage;
+ fClipData.fOrigin = clipData->fOrigin;
+ }
+
+ virtual void execute(GrDrawTarget*);
+
+ GrClipData fClipData;
+
+ private:
+ SkClipStack fStackStorage;
};
+ typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
+ typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
+
// overrides from GrDrawTarget
virtual void onDraw(const DrawInfo&) SK_OVERRIDE;
virtual void onDrawRect(const SkRect& rect,
@@ -247,57 +283,25 @@ private:
// Determines whether the current draw operation requieres a new drawstate and if so records it.
void recordStateIfNecessary();
// We lazily record clip changes in order to skip clips that have no effect.
- bool needsNewClip() const;
-
- // these functions record a command
- void recordState();
- void recordClip();
- Draw* recordDraw(const DrawInfo&, const GrVertexBuffer*, const GrIndexBuffer*);
- StencilPath* recordStencilPath(const GrPath*);
- DrawPath* recordDrawPath(const GrPath*);
- DrawPaths* recordDrawPaths(const GrPathRange*);
- Clear* recordClear(GrRenderTarget*);
- CopySurface* recordCopySurface(GrSurface* dst, GrSurface* src);
+ void recordClipIfNecessary();
+ // Records any trace markers for a command after adding it to the buffer.
+ void recordTraceMarkersIfNecessary();
virtual bool isIssued(uint32_t drawID) { return drawID != fDrawID; }
- void addToCmdBuffer(uint8_t cmd);
// TODO: Use a single allocator for commands and records
enum {
- kCmdPreallocCnt = 32,
- kDrawPreallocCnt = 16,
- kStencilPathPreallocCnt = 8,
- kDrawPathPreallocCnt = 8,
- kDrawPathsPreallocCnt = 8,
- kStatePreallocCnt = 8,
- kClipPreallocCnt = 8,
- kClearPreallocCnt = 8,
- kGeoPoolStatePreAllocCnt = 4,
- kCopySurfacePreallocCnt = 4,
+ kCmdBufferInitialSizeInBytes = 64 * 1024,
+ kGeoPoolStatePreAllocCnt = 4,
};
- typedef GrTAllocator<Draw> DrawAllocator;
- typedef GrTAllocator<StencilPath> StencilPathAllocator;
- typedef GrTAllocator<DrawPath> DrawPathAllocator;
- typedef GrTAllocator<DrawPaths> DrawPathsAllocator;
- typedef GrTAllocator<GrDrawState> StateAllocator;
- typedef GrTAllocator<Clear> ClearAllocator;
- typedef GrTAllocator<CopySurface> CopySurfaceAllocator;
- typedef GrTAllocator<Clip> ClipAllocator;
-
- GrSTAllocator<kDrawPreallocCnt, Draw> fDraws;
- GrSTAllocator<kStencilPathPreallocCnt, StencilPath> fStencilPaths;
- GrSTAllocator<kDrawPathPreallocCnt, DrawPath> fDrawPath;
- GrSTAllocator<kDrawPathsPreallocCnt, DrawPaths> fDrawPaths;
- GrSTAllocator<kStatePreallocCnt, GrDrawState> fStates;
- GrSTAllocator<kClearPreallocCnt, Clear> fClears;
- GrSTAllocator<kCopySurfacePreallocCnt, CopySurface> fCopySurfaces;
- GrSTAllocator<kClipPreallocCnt, Clip> fClips;
-
- SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers;
- SkSTArray<kCmdPreallocCnt, uint8_t, true> fCmds;
- GrDrawTarget* fDstGpu;
- bool fClipSet;
+ CmdBuffer fCmdBuffer;
+ GrDrawState* fLastState;
+ GrClipData* fLastClip;
+
+ SkTArray<GrTraceMarkerSet, false> fGpuCmdMarkers;
+ GrDrawTarget* fDstGpu;
+ bool fClipSet;
enum ClipProxyState {
kUnknown_ClipProxyState,
diff --git a/src/gpu/GrTRecorder.h b/src/gpu/GrTRecorder.h
new file mode 100644
index 0000000000..c8f7644f4f
--- /dev/null
+++ b/src/gpu/GrTRecorder.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTRecorder_DEFINED
+#define GrTRecorder_DEFINED
+
+#include "SkTemplates.h"
+#include "SkTypes.h"
+
+template<typename TBase, typename TAlign> class GrTRecorder;
+template<typename TItem> struct GrTRecorderAllocWrapper;
+
+/**
+ * Records a list of items with a common base type, optional associated data, and
+ * permanent memory addresses.
+ *
+ * This class preallocates its own chunks of memory for hosting objects, so new items can
+ * be created without excessive calls to malloc().
+ *
+ * To create a new item and append it to the back of the list, use the following macros:
+ *
+ * GrNEW_APPEND_TO_RECORDER(recorder, SubclassName, (args))
+ * GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, SubclassName, (args), sizeOfData)
+ *
+ * Upon reset or delete, the items are destructed in the same order they were received,
+ * not reverse (stack) order.
+ *
+ * @param TBase Common base type of items in the list. If TBase is not a class with a
+ * virtual destructor, the client is responsible for invoking any necessary
+ * destructors.
+ *
+ * For now, any subclass used in the list must have the same start address
+ * as TBase (or in other words, the types must be convertible via
+ * reinterpret_cast<>). Classes with multiple inheritance (or any subclass
+ * on an obscure compiler) may not be compatible. This is runtime asserted
+ * in debug builds.
+ *
+ * @param TAlign A type whose size is the desired memory alignment for object allocations.
+ * This should be the largest known alignment requirement for all objects
+ * that may be stored in the list.
+ */
+template<typename TBase, typename TAlign> class GrTRecorder : SkNoncopyable {
+public:
+ class Iter;
+
+ /**
+ * Create a recorder.
+ *
+ * @param initialSizeInBytes The amount of memory reserved by the recorder initially,
+ and after calls to reset().
+ */
+ GrTRecorder(int initialSizeInBytes)
+ : fHeadBlock(MemBlock::Alloc(LengthOf(initialSizeInBytes))),
+ fTailBlock(fHeadBlock),
+ fLastItem(NULL) {}
+
+ ~GrTRecorder() {
+ this->reset();
+ sk_free(fHeadBlock);
+ }
+
+ bool empty() { return !fLastItem; }
+
+ TBase& back() {
+ SkASSERT(!this->empty());
+ return *fLastItem;
+ }
+
+ /**
+ * Destruct all items in the list and reset to empty.
+ */
+ void reset();
+
+ /**
+ * Retrieve the extra data associated with an item that was allocated using
+ * GrNEW_APPEND_WITH_DATA_TO_RECORDER().
+ *
+ * @param item The item whose data to retrieve. The pointer must be of the same type
+ * that was allocated initally; it can't be a pointer to a base class.
+ *
+ * @return The item's associated data.
+ */
+ template<typename TItem> static const void* GetDataForItem(const TItem* item) {
+ const TAlign* ptr = reinterpret_cast<const TAlign*>(item);
+ return &ptr[length_of<TItem>::kValue];
+ }
+ template<typename TItem> static void* GetDataForItem(TItem* item) {
+ TAlign* ptr = reinterpret_cast<TAlign*>(item);
+ return &ptr[length_of<TItem>::kValue];
+ }
+
+private:
+ template<typename TItem> struct length_of {
+ enum { kValue = (sizeof(TItem) + sizeof(TAlign) - 1) / sizeof(TAlign) };
+ };
+ static int LengthOf(int bytes) { return (bytes + sizeof(TAlign) - 1) / sizeof(TAlign); }
+
+ struct Header {
+ int fTotalLength;
+ };
+ template<typename TItem> TItem* alloc_back(int dataLength);
+
+ struct MemBlock {
+ static MemBlock* Alloc(int length) {
+ void* ptr = sk_malloc_throw(sizeof(TAlign) * (length_of<MemBlock>::kValue + length));
+ return SkNEW_PLACEMENT_ARGS(ptr, MemBlock, (length));
+ }
+ TAlign& operator [](int i) {
+ return reinterpret_cast<TAlign*>(this)[length_of<MemBlock>::kValue + i];
+ }
+ ~MemBlock() { sk_free(fNext); }
+
+ const int fLength;
+ int fBack;
+ MemBlock* fNext;
+
+ private:
+ MemBlock(int length) : fLength(length), fBack(0), fNext(NULL) {}
+ };
+ MemBlock* const fHeadBlock;
+ MemBlock* fTailBlock;
+
+ TBase* fLastItem;
+
+ template<typename TItem> friend struct GrTRecorderAllocWrapper;
+
+ template <typename UBase, typename UAlign, typename UAlloc>
+ friend void* operator new(size_t, GrTRecorder<UBase, UAlign>&,
+ const GrTRecorderAllocWrapper<UAlloc>&);
+
+ friend class Iter;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+template<typename TBase, typename TAlign>
+template<typename TItem>
+TItem* GrTRecorder<TBase, TAlign>::alloc_back(int dataLength) {
+ const int totalLength = length_of<Header>::kValue + length_of<TItem>::kValue + dataLength;
+
+ if (fTailBlock->fBack + totalLength > fTailBlock->fLength) {
+ SkASSERT(!fTailBlock->fNext);
+ fTailBlock->fNext = MemBlock::Alloc(SkTMax(2 * fTailBlock->fLength, totalLength));
+ fTailBlock = fTailBlock->fNext;
+ }
+
+ Header* header = reinterpret_cast<Header*>(&(*fTailBlock)[fTailBlock->fBack]);
+ TItem* rawPtr = reinterpret_cast<TItem*>(
+ &(*fTailBlock)[fTailBlock->fBack + length_of<Header>::kValue]);
+
+ header->fTotalLength = totalLength;
+ fLastItem = rawPtr;
+ fTailBlock->fBack += totalLength;
+
+ // FIXME: We currently require that the base and subclass share the same start address.
+ // This is not required by the C++ spec, and is likely to not be true in the case of
+ // multiple inheritance or a base class that doesn't have virtual methods (when the
+ // subclass does). It would be ideal to find a more robust solution that comes at no
+ // extra cost to performance or code generality.
+ SkDEBUGCODE(void* baseAddr = fLastItem;
+ void* subclassAddr = rawPtr);
+ SkASSERT(baseAddr == subclassAddr);
+
+ return rawPtr;
+}
+
+template<typename TBase, typename TAlign>
+class GrTRecorder<TBase, TAlign>::Iter {
+public:
+ Iter(GrTRecorder& recorder) : fBlock(recorder.fHeadBlock), fPosition(0), fItem(NULL) {}
+
+ bool next() {
+ if (fPosition >= fBlock->fBack) {
+ SkASSERT(fPosition == fBlock->fBack);
+ if (!fBlock->fNext) {
+ return false;
+ }
+ SkASSERT(0 != fBlock->fNext->fBack);
+ fBlock = fBlock->fNext;
+ fPosition = 0;
+ }
+
+ Header* header = reinterpret_cast<Header*>(&(*fBlock)[fPosition]);
+ fItem = reinterpret_cast<TBase*>(&(*fBlock)[fPosition + length_of<Header>::kValue]);
+ fPosition += header->fTotalLength;
+ return true;
+ }
+
+ TBase* get() const {
+ SkASSERT(fItem);
+ return fItem;
+ }
+
+ TBase* operator->() const { return this->get(); }
+
+private:
+ MemBlock* fBlock;
+ int fPosition;
+ TBase* fItem;
+};
+
+template<typename TBase, typename TAlign>
+void GrTRecorder<TBase, TAlign>::reset() {
+ Iter iter(*this);
+ while (iter.next()) {
+ iter->~TBase();
+ }
+ fHeadBlock->fBack = 0;
+ sk_free(fHeadBlock->fNext);
+ fHeadBlock->fNext = NULL;
+ fTailBlock = fHeadBlock;
+ fLastItem = NULL;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+template<typename TItem> struct GrTRecorderAllocWrapper {
+ GrTRecorderAllocWrapper() : fDataLength(0) {}
+
+ template <typename TBase, typename TAlign>
+ GrTRecorderAllocWrapper(const GrTRecorder<TBase, TAlign>&, int sizeOfData)
+ : fDataLength(GrTRecorder<TBase, TAlign>::LengthOf(sizeOfData)) {}
+
+ const int fDataLength;
+};
+
+template <typename TBase, typename TAlign, typename TItem>
+void* operator new(size_t size, GrTRecorder<TBase, TAlign>& recorder,
+ const GrTRecorderAllocWrapper<TItem>& wrapper) {
+ SkASSERT(size == sizeof(TItem));
+ return recorder.template alloc_back<TItem>(wrapper.fDataLength);
+}
+
+template <typename TBase, typename TAlign, typename TItem>
+void operator delete(void*, GrTRecorder<TBase, TAlign>&, const GrTRecorderAllocWrapper<TItem>&) {
+ // We only provide an operator delete to work around compiler warnings that can come
+ // up for an unmatched operator new when compiling with exceptions.
+ SK_CRASH();
+}
+
+#define GrNEW_APPEND_TO_RECORDER(recorder, type_name, args) \
+ (new (recorder, GrTRecorderAllocWrapper<type_name>()) type_name args)
+
+#define GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, type_name, args, size_of_data) \
+ (new (recorder, GrTRecorderAllocWrapper<type_name>(recorder, size_of_data)) type_name args)
+
+#endif
diff --git a/tests/GrTRecorderTest.cpp b/tests/GrTRecorderTest.cpp
new file mode 100644
index 0000000000..a5aedf687c
--- /dev/null
+++ b/tests/GrTRecorderTest.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if SK_SUPPORT_GPU
+
+#include "SkMatrix.h"
+#include "SkString.h"
+#include "GrTRecorder.h"
+#include "Test.h"
+
+////////////////////////////////////////////////////////////////////////////////
+
+static int activeRecorderItems = 0;
+
+class IntWrapper {
+public:
+ IntWrapper() {}
+ IntWrapper(int value) : fValue(value) {}
+ operator int() { return fValue; }
+private:
+ int fValue;
+};
+
+static void test_empty_back(skiatest::Reporter* reporter) {
+ GrTRecorder<IntWrapper, int> recorder(0);
+
+ REPORTER_ASSERT(reporter, recorder.empty());
+
+ for (int i = 0; i < 100; ++i) {
+ REPORTER_ASSERT(reporter, i == *GrNEW_APPEND_TO_RECORDER(recorder, IntWrapper, (i)));
+ REPORTER_ASSERT(reporter, !recorder.empty());
+ REPORTER_ASSERT(reporter, i == recorder.back());
+ }
+
+ REPORTER_ASSERT(reporter, !recorder.empty());
+
+ recorder.reset();
+
+ REPORTER_ASSERT(reporter, recorder.empty());
+}
+
+struct ExtraData {
+ typedef GrTRecorder<ExtraData, int> Recorder;
+
+ ExtraData(int i) : fData(i) {
+ int* extraData = this->extraData();
+ for (int j = 0; j < i; j++) {
+ extraData[j] = i;
+ }
+ ++activeRecorderItems;
+ }
+ ~ExtraData() {
+ --activeRecorderItems;
+ }
+ int* extraData() {
+ return reinterpret_cast<int*>(Recorder::GetDataForItem(this));
+ }
+ int fData;
+};
+
+static void test_extra_data(skiatest::Reporter* reporter) {
+ ExtraData::Recorder recorder(0);
+ for (int i = 0; i < 100; ++i) {
+ GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, ExtraData, (i), i * sizeof(int));
+ }
+ REPORTER_ASSERT(reporter, 100 == activeRecorderItems);
+
+ ExtraData::Recorder::Iter iter(recorder);
+ for (int i = 0; i < 100; ++i) {
+ REPORTER_ASSERT(reporter, iter.next());
+ REPORTER_ASSERT(reporter, i == iter->fData);
+ for (int j = 0; j < i; j++) {
+ REPORTER_ASSERT(reporter, i == iter->extraData()[j]);
+ }
+ }
+ REPORTER_ASSERT(reporter, !iter.next());
+
+ recorder.reset();
+ REPORTER_ASSERT(reporter, 0 == activeRecorderItems);
+}
+
+enum ClassType {
+ kBase_ClassType,
+ kSubclass_ClassType,
+ kSubSubclass_ClassType,
+ kSubclassExtraData_ClassType,
+ kSubclassEmpty_ClassType,
+
+ kNumClassTypes
+};
+
+class Base {
+public:
+ typedef GrTRecorder<Base, void*> Recorder;
+
+ Base() {
+ fMatrix.reset();
+ ++activeRecorderItems;
+ }
+
+ virtual ~Base() { --activeRecorderItems; }
+
+ virtual ClassType getType() { return kBase_ClassType; }
+
+ virtual void validate(skiatest::Reporter* reporter) const {
+ REPORTER_ASSERT(reporter, fMatrix.isIdentity());
+ }
+
+private:
+ SkMatrix fMatrix;
+};
+
+class Subclass : public Base {
+public:
+ Subclass() : fString("Lorem ipsum dolor sit amet") {}
+
+ virtual ClassType getType() { return kSubclass_ClassType; }
+
+ virtual void validate(skiatest::Reporter* reporter) const {
+ Base::validate(reporter);
+ REPORTER_ASSERT(reporter, !strcmp("Lorem ipsum dolor sit amet", fString.c_str()));
+ }
+
+private:
+ SkString fString;
+};
+
+class SubSubclass : public Subclass {
+public:
+ SubSubclass() : fInt(1234), fFloat(1.234f) {}
+
+ virtual ClassType getType() { return kSubSubclass_ClassType; }
+
+ virtual void validate(skiatest::Reporter* reporter) const {
+ Subclass::validate(reporter);
+ REPORTER_ASSERT(reporter, 1234 == fInt);
+ REPORTER_ASSERT(reporter, 1.234f == fFloat);
+ }
+
+private:
+ int fInt;
+ float fFloat;
+};
+
+class SubclassExtraData : public Base {
+public:
+ SubclassExtraData(int length) : fLength(length) {
+ int* data = reinterpret_cast<int*>(Recorder::GetDataForItem(this));
+ for (int i = 0; i < fLength; ++i) {
+ data[i] = ValueAt(i);
+ }
+ }
+
+ virtual ClassType getType() { return kSubclassExtraData_ClassType; }
+
+ virtual void validate(skiatest::Reporter* reporter) const {
+ Base::validate(reporter);
+ const int* data = reinterpret_cast<const int*>(Recorder::GetDataForItem(this));
+ for (int i = 0; i < fLength; ++i) {
+ REPORTER_ASSERT(reporter, ValueAt(i) == data[i]);
+ }
+ }
+
+private:
+ static int ValueAt(uint64_t i) { return static_cast<int>(123456789 + 987654321 * i); }
+ int fLength;
+};
+
+class SubclassEmpty : public Base {
+public:
+ virtual ClassType getType() { return kSubclassEmpty_ClassType; }
+};
+
+static void test_subclasses(skiatest::Reporter* reporter) {
+ class Order {
+ public:
+ Order() { this->reset(); }
+ void reset() { fCurrent = 0; }
+ ClassType next() {
+ fCurrent = 1664525 * fCurrent + 1013904223;
+ return static_cast<ClassType>(fCurrent % kNumClassTypes);
+ }
+ private:
+ uint32_t fCurrent;
+ };
+
+ Base::Recorder recorder(1024);
+
+ Order order;
+ for (int i = 0; i < 1000; i++) {
+ switch (order.next()) {
+ case kBase_ClassType:
+ GrNEW_APPEND_TO_RECORDER(recorder, Base, ());
+ break;
+
+ case kSubclass_ClassType:
+ GrNEW_APPEND_TO_RECORDER(recorder, Subclass, ());
+ break;
+
+ case kSubSubclass_ClassType:
+ GrNEW_APPEND_TO_RECORDER(recorder, SubSubclass, ());
+ break;
+
+ case kSubclassExtraData_ClassType:
+ GrNEW_APPEND_WITH_DATA_TO_RECORDER(recorder, SubclassExtraData, (i), sizeof(int) * i);
+ break;
+
+ case kSubclassEmpty_ClassType:
+ GrNEW_APPEND_TO_RECORDER(recorder, SubclassEmpty, ());
+ break;
+
+ default:
+ reporter->reportFailed(SkString("Invalid class type"));
+ break;
+ }
+ }
+ REPORTER_ASSERT(reporter, 1000 == activeRecorderItems);
+
+ order.reset();
+ Base::Recorder::Iter iter(recorder);
+ for (int i = 0; i < 1000; ++i) {
+ REPORTER_ASSERT(reporter, iter.next());
+ REPORTER_ASSERT(reporter, order.next() == iter->getType());
+ iter->validate(reporter);
+ }
+ REPORTER_ASSERT(reporter, !iter.next());
+
+ // Don't reset the recorder. It should automatically destruct all its items.
+}
+
+DEF_GPUTEST(GrTRecorder, reporter, factory) {
+ test_empty_back(reporter);
+
+ test_extra_data(reporter);
+ REPORTER_ASSERT(reporter, 0 == activeRecorderItems); // test_extra_data should call reset().
+
+ test_subclasses(reporter);
+ REPORTER_ASSERT(reporter, 0 == activeRecorderItems); // Ensure ~GrTRecorder invokes dtors.
+}
+
+#endif