aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar joshualitt <joshualitt@chromium.org>2015-03-13 11:47:42 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-03-13 11:47:42 -0700
commit5bf99f1ca8f30287803b594d06c60a7b6796ad45 (patch)
tree84fc1202da6ce911d29e4ca8f618136e690b38b5 /src
parent2a679ae8f5b80a337f67783dbc0a447a9f4312c7 (diff)
Creation of GrBatchAtlas and Distancefieldpathrenderer batch
Diffstat (limited to 'src')
-rwxr-xr-xsrc/gpu/GrAADistanceFieldPathRenderer.cpp745
-rwxr-xr-xsrc/gpu/GrAADistanceFieldPathRenderer.h32
-rw-r--r--src/gpu/GrBatch.h3
-rw-r--r--src/gpu/GrBatchAtlas.cpp369
-rw-r--r--src/gpu/GrBatchAtlas.h88
-rw-r--r--src/gpu/GrBatchTarget.cpp57
-rw-r--r--src/gpu/GrBatchTarget.h103
-rw-r--r--src/gpu/GrBitmapTextContext.h3
-rw-r--r--src/gpu/GrDrawTarget.h1
-rw-r--r--src/gpu/GrPipelineBuilder.cpp1
-rw-r--r--src/gpu/GrPipelineBuilder.h2
-rw-r--r--src/gpu/GrTargetCommands.h1
-rw-r--r--src/gpu/gl/builders/GrGLVertexShaderBuilder.h1
13 files changed, 1074 insertions, 332 deletions
diff --git a/src/gpu/GrAADistanceFieldPathRenderer.cpp b/src/gpu/GrAADistanceFieldPathRenderer.cpp
index d50af9fbd8..b9ca1ec26b 100755
--- a/src/gpu/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/GrAADistanceFieldPathRenderer.cpp
@@ -8,7 +8,9 @@
#include "GrAADistanceFieldPathRenderer.h"
-#include "GrAtlas.h"
+#include "GrBatch.h"
+#include "GrBatchTarget.h"
+#include "GrBufferAllocPool.h"
#include "GrContext.h"
#include "GrPipelineBuilder.h"
#include "GrSurfacePriv.h"
@@ -27,9 +29,6 @@
#define NUM_PLOTS_X (ATLAS_TEXTURE_WIDTH / PLOT_WIDTH)
#define NUM_PLOTS_Y (ATLAS_TEXTURE_HEIGHT / PLOT_HEIGHT)
-SK_CONF_DECLARE(bool, c_DumpPathCache, "gpu.dumpPathCache", false,
- "Dump the contents of the path cache before every purge.");
-
#ifdef DF_PATH_TRACKING
static int g_NumCachedPaths = 0;
static int g_NumFreedPaths = 0;
@@ -40,11 +39,30 @@ static const int kSmallMIP = 32;
static const int kMediumMIP = 78;
static const int kLargeMIP = 192;
+// Callback to clear out internal path cache when eviction occurs
+void GrAADistanceFieldPathRenderer::HandleEviction(GrBatchAtlas::AtlasID id, void* pr) {
+ GrAADistanceFieldPathRenderer* dfpr = (GrAADistanceFieldPathRenderer*)pr;
+ // remove any paths that use this plot
+ PathDataList::Iter iter;
+ iter.init(dfpr->fPathList, PathDataList::Iter::kHead_IterStart);
+ PathData* pathData;
+ while ((pathData = iter.get())) {
+ iter.next();
+ if (id == pathData->fID) {
+ dfpr->fPathCache.remove(pathData->fKey);
+ dfpr->fPathList.remove(pathData);
+ SkDELETE(pathData);
+#ifdef DF_PATH_TRACKING
+ ++g_NumFreedPaths;
+#endif
+ }
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
GrAADistanceFieldPathRenderer::GrAADistanceFieldPathRenderer(GrContext* context)
: fContext(context)
- , fAtlas(NULL)
- , fEffectFlags(kInvalid_DistanceFieldEffectFlag) {
+ , fAtlas(NULL) {
}
GrAADistanceFieldPathRenderer::~GrAADistanceFieldPathRenderer() {
@@ -56,7 +74,6 @@ GrAADistanceFieldPathRenderer::~GrAADistanceFieldPathRenderer() {
fPathList.remove(pathData);
SkDELETE(pathData);
}
-
SkDELETE(fAtlas);
#ifdef DF_PATH_TRACKING
@@ -89,7 +106,7 @@ bool GrAADistanceFieldPathRenderer::canDrawPath(const GrDrawTarget* target,
SkScalar maxScale = viewMatrix.getMaxScale();
const SkRect& bounds = path.getBounds();
SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
- return maxDim < 64.f && maxDim*maxScale < 256.f;
+ return maxDim < 64.f && maxDim * maxScale < 256.f;
}
@@ -103,285 +120,501 @@ GrAADistanceFieldPathRenderer::onGetStencilSupport(const GrDrawTarget*,
////////////////////////////////////////////////////////////////////////////////
-bool GrAADistanceFieldPathRenderer::onDrawPath(GrDrawTarget* target,
- GrPipelineBuilder* pipelineBuilder,
- GrColor color,
- const SkMatrix& viewMatrix,
- const SkPath& path,
- const SkStrokeRec& stroke,
- bool antiAlias) {
- // we've already bailed on inverse filled paths, so this is safe
- if (path.isEmpty()) {
- return true;
+// padding around path bounds to allow for antialiased pixels
+static const SkScalar kAntiAliasPad = 1.0f;
+
+class AADistanceFieldPathBatch : public GrBatch {
+public:
+ typedef GrAADistanceFieldPathRenderer::PathData PathData;
+ typedef SkTDynamicHash<PathData, PathData::Key> PathCache;
+ typedef GrAADistanceFieldPathRenderer::PathDataList PathDataList;
+
+ struct Geometry {
+ Geometry(const SkStrokeRec& stroke) : fStroke(stroke) {}
+ SkPath fPath;
+ SkStrokeRec fStroke;
+ bool fAntiAlias;
+ PathData* fPathData;
+ };
+
+ static GrBatch* Create(const Geometry& geometry, GrColor color, const SkMatrix& viewMatrix,
+ GrBatchAtlas* atlas, PathCache* pathCache, PathDataList* pathList) {
+ return SkNEW_ARGS(AADistanceFieldPathBatch, (geometry, color, viewMatrix,
+ atlas, pathCache, pathList));
}
- SkASSERT(fContext);
+ const char* name() const SK_OVERRIDE { return "AADistanceFieldPathBatch"; }
- // get mip level
- SkScalar maxScale = viewMatrix.getMaxScale();
- const SkRect& bounds = path.getBounds();
- SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
- SkScalar size = maxScale*maxDim;
- uint32_t desiredDimension;
- if (size <= kSmallMIP) {
- desiredDimension = kSmallMIP;
- } else if (size <= kMediumMIP) {
- desiredDimension = kMediumMIP;
- } else {
- desiredDimension = kLargeMIP;
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE {
+ out->setKnownFourComponents(fBatch.fColor);
}
- // check to see if path is cached
- // TODO: handle stroked vs. filled version of same path
- PathData::Key key = { path.getGenerationID(), desiredDimension };
- PathData* pathData = fPathCache.find(key);
- if (NULL == pathData) {
- SkScalar scale = desiredDimension/maxDim;
- pathData = this->addPathToAtlas(path, stroke, antiAlias, desiredDimension, scale);
- if (NULL == pathData) {
- return false;
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
+ out->setUnknownSingleComponent();
+ }
+
+ void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE {
+ // Handle any color overrides
+ if (init.fColorIgnored) {
+ fBatch.fColor = GrColor_ILLEGAL;
+ } else if (GrColor_ILLEGAL != init.fOverrideColor) {
+ fBatch.fColor = init.fOverrideColor;
}
+
+ // setup batch properties
+ fBatch.fColorIgnored = init.fColorIgnored;
+ fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
+ fBatch.fCoverageIgnored = init.fCoverageIgnored;
}
- // use signed distance field to render
- return this->internalDrawPath(target, pipelineBuilder, color, viewMatrix, path, pathData);
-}
+ void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) SK_OVERRIDE {
+ int instanceCount = fGeoData.count();
-// padding around path bounds to allow for antialiased pixels
-const SkScalar kAntiAliasPad = 1.0f;
-
-inline bool GrAADistanceFieldPathRenderer::uploadPath(GrPlot** plot, SkIPoint16* atlasLocation,
- int width, int height, void* dfStorage) {
- *plot = fAtlas->addToAtlas(&fPlotUsage, width, height, dfStorage, atlasLocation);
-
- // if atlas full
- if (NULL == *plot) {
- if (this->freeUnusedPlot()) {
- *plot = fAtlas->addToAtlas(&fPlotUsage, width, height, dfStorage, atlasLocation);
- if (*plot) {
- return true;
- }
+ SkMatrix invert;
+ if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
+ SkDebugf("Could not invert viewmatrix\n");
+ return;
}
- if (c_DumpPathCache) {
-#ifdef SK_DEVELOPER
- GrTexture* texture = fAtlas->getTexture();
- texture->surfacePriv().savePixels("pathcache.png");
-#endif
+ uint32_t flags = 0;
+ flags |= this->viewMatrix().isSimilarity() ? kSimilarity_DistanceFieldEffectFlag : 0;
+
+ GrTextureParams params(SkShader::kRepeat_TileMode, GrTextureParams::kBilerp_FilterMode);
+
+ // Setup GrGeometryProcessor
+ GrBatchAtlas* atlas = fAtlas;
+ SkAutoTUnref<GrGeometryProcessor> dfProcessor(
+ GrDistanceFieldNoGammaTextureEffect::Create(this->color(),
+ this->viewMatrix(),
+ atlas->getTexture(),
+ params,
+ flags,
+ false));
+
+ this->initDraw(batchTarget, dfProcessor, pipeline);
+
+ // allocate vertices
+ size_t vertexStride = dfProcessor->getVertexStride();
+ SkASSERT(vertexStride == 2 * sizeof(SkPoint));
+
+ int vertexCount = GrBatchTarget::kVertsPerRect * instanceCount;
+
+ const GrVertexBuffer* vertexBuffer;
+ int firstVertex;
+
+ void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
+ vertexCount,
+ &vertexBuffer,
+ &firstVertex);
+
+ if (!vertices) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
}
- // before we purge the cache, we must flush any accumulated draws
- fContext->flush();
+ // We may have to flush while uploading path data to the atlas, so we set up the draw here
+ const GrIndexBuffer* quadIndexBuffer = batchTarget->quadIndexBuffer();
+ int maxInstancesPerDraw = quadIndexBuffer->maxQuads();
+
+ GrDrawTarget::DrawInfo drawInfo;
+ drawInfo.setPrimitiveType(kTriangles_GrPrimitiveType);
+ drawInfo.setStartVertex(0);
+ drawInfo.setStartIndex(0);
+ drawInfo.setVerticesPerInstance(GrBatchTarget::kVertsPerRect);
+ drawInfo.setIndicesPerInstance(GrBatchTarget::kIndicesPerRect);
+ drawInfo.adjustStartVertex(firstVertex);
+ drawInfo.setVertexBuffer(vertexBuffer);
+ drawInfo.setIndexBuffer(quadIndexBuffer);
+
+ int instancesToFlush = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ Geometry& args = fGeoData[i];
+
+ // get mip level
+ SkScalar maxScale = this->viewMatrix().getMaxScale();
+ const SkRect& bounds = args.fPath.getBounds();
+ SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
+ SkScalar size = maxScale * maxDim;
+ uint32_t desiredDimension;
+ if (size <= kSmallMIP) {
+ desiredDimension = kSmallMIP;
+ } else if (size <= kMediumMIP) {
+ desiredDimension = kMediumMIP;
+ } else {
+ desiredDimension = kLargeMIP;
+ }
- if (this->freeUnusedPlot()) {
- *plot = fAtlas->addToAtlas(&fPlotUsage, width, height, dfStorage, atlasLocation);
- if (*plot) {
- return true;
+ // check to see if path is cached
+ // TODO: handle stroked vs. filled version of same path
+ PathData::Key key = { args.fPath.getGenerationID(), desiredDimension };
+ args.fPathData = fPathCache->find(key);
+ if (NULL == args.fPathData || !atlas->hasID(args.fPathData->fID)) {
+ // Remove the stale cache entry
+ if (args.fPathData) {
+ fPathCache->remove(args.fPathData->fKey);
+ fPathList->remove(args.fPathData);
+ SkDELETE(args.fPathData);
+ }
+ SkScalar scale = desiredDimension/maxDim;
+ args.fPathData = SkNEW(PathData);
+ if (!this->addPathToAtlas(batchTarget,
+ dfProcessor,
+ pipeline,
+ &drawInfo,
+ &instancesToFlush,
+ maxInstancesPerDraw,
+ atlas,
+ args.fPathData,
+ args.fPath,
+ args.fStroke,
+ args.fAntiAlias,
+ desiredDimension,
+ scale)) {
+ SkDebugf("Can't rasterize path\n");
+ return;
+ }
}
- }
- return false;
- }
- return true;
-}
-GrAADistanceFieldPathRenderer::PathData* GrAADistanceFieldPathRenderer::addPathToAtlas(
- const SkPath& path,
- const SkStrokeRec& stroke,
- bool antiAlias,
- uint32_t dimension,
- SkScalar scale) {
-
- // generate distance field and add to atlas
- if (NULL == fAtlas) {
- SkISize textureSize = SkISize::Make(ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT);
- fAtlas = SkNEW_ARGS(GrAtlas, (fContext->getGpu(), kAlpha_8_GrPixelConfig,
- kNone_GrSurfaceFlags, textureSize,
- NUM_PLOTS_X, NUM_PLOTS_Y, false));
- if (NULL == fAtlas) {
- return NULL;
+ atlas->setLastRefToken(args.fPathData->fID, batchTarget->currentToken());
+
+ // Now set vertices
+ intptr_t offset = reinterpret_cast<intptr_t>(vertices);
+ offset += i * GrBatchTarget::kVertsPerRect * vertexStride;
+ SkPoint* positions = reinterpret_cast<SkPoint*>(offset);
+ this->drawPath(batchTarget,
+ atlas,
+ pipeline,
+ dfProcessor,
+ positions,
+ vertexStride,
+ this->viewMatrix(),
+ args.fPath,
+ args.fPathData);
+ instancesToFlush++;
}
+
+ this->flush(batchTarget, dfProcessor, pipeline, &drawInfo, instancesToFlush,
+ maxInstancesPerDraw);
}
-
- const SkRect& bounds = path.getBounds();
-
- // generate bounding rect for bitmap draw
- SkRect scaledBounds = bounds;
- // scale to mip level size
- scaledBounds.fLeft *= scale;
- scaledBounds.fTop *= scale;
- scaledBounds.fRight *= scale;
- scaledBounds.fBottom *= scale;
- // move the origin to an integer boundary (gives better results)
- SkScalar dx = SkScalarFraction(scaledBounds.fLeft);
- SkScalar dy = SkScalarFraction(scaledBounds.fTop);
- scaledBounds.offset(-dx, -dy);
- // get integer boundary
- SkIRect devPathBounds;
- scaledBounds.roundOut(&devPathBounds);
- // pad to allow room for antialiasing
- devPathBounds.outset(SkScalarCeilToInt(kAntiAliasPad), SkScalarCeilToInt(kAntiAliasPad));
- // move origin to upper left corner
- devPathBounds.offsetTo(0,0);
-
- // draw path to bitmap
- SkMatrix drawMatrix;
- drawMatrix.setTranslate(-bounds.left(), -bounds.top());
- drawMatrix.postScale(scale, scale);
- drawMatrix.postTranslate(kAntiAliasPad, kAntiAliasPad);
- GrSWMaskHelper helper(fContext);
-
- if (!helper.init(devPathBounds, &drawMatrix)) {
- return NULL;
- }
- helper.draw(path, stroke, SkRegion::kReplace_Op, antiAlias, 0xFF);
-
- // generate signed distance field
- devPathBounds.outset(SK_DistanceFieldPad, SK_DistanceFieldPad);
- int width = devPathBounds.width();
- int height = devPathBounds.height();
- SkAutoSMalloc<1024> dfStorage(width*height*sizeof(unsigned char));
- helper.toSDF((unsigned char*) dfStorage.get());
-
- // add to atlas
- GrPlot* plot;
- SkIPoint16 atlasLocation;
- if (!this->uploadPath(&plot, &atlasLocation, width, height, dfStorage.get())) {
- return NULL;
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+private:
+ AADistanceFieldPathBatch(const Geometry& geometry, GrColor color, const SkMatrix& viewMatrix,
+ GrBatchAtlas* atlas,
+ PathCache* pathCache, PathDataList* pathList) {
+ this->initClassID<AADistanceFieldPathBatch>();
+ fBatch.fColor = color;
+ fBatch.fViewMatrix = viewMatrix;
+ fGeoData.push_back(geometry);
+ fGeoData.back().fPathData = NULL;
+
+ fAtlas = atlas;
+ fPathCache = pathCache;
+ fPathList = pathList;
}
- // add to cache
- PathData* pathData = SkNEW(PathData);
- pathData->fKey.fGenID = path.getGenerationID();
- pathData->fKey.fDimension = dimension;
- pathData->fScale = scale;
- pathData->fPlot = plot;
- // change the scaled rect to match the size of the inset distance field
- scaledBounds.fRight = scaledBounds.fLeft +
- SkIntToScalar(devPathBounds.width() - 2*SK_DistanceFieldInset);
- scaledBounds.fBottom = scaledBounds.fTop +
- SkIntToScalar(devPathBounds.height() - 2*SK_DistanceFieldInset);
- // shift the origin to the correct place relative to the distance field
- // need to also restore the fractional translation
- scaledBounds.offset(-SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dx,
- -SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dy);
- pathData->fBounds = scaledBounds;
- // origin we render from is inset from distance field edge
- atlasLocation.fX += SK_DistanceFieldInset;
- atlasLocation.fY += SK_DistanceFieldInset;
- pathData->fAtlasLocation = atlasLocation;
-
- fPathCache.add(pathData);
- fPathList.addToTail(pathData);
+ bool addPathToAtlas(GrBatchTarget* batchTarget,
+ const GrGeometryProcessor* dfProcessor,
+ const GrPipeline* pipeline,
+ GrDrawTarget::DrawInfo* drawInfo,
+ int* instancesToFlush,
+ int maxInstancesPerDraw,
+ GrBatchAtlas* atlas,
+ PathData* pathData,
+ const SkPath& path,
+ const SkStrokeRec&
+ stroke, bool antiAlias,
+ uint32_t dimension,
+ SkScalar scale) {
+ const SkRect& bounds = path.getBounds();
+
+ // generate bounding rect for bitmap draw
+ SkRect scaledBounds = bounds;
+ // scale to mip level size
+ scaledBounds.fLeft *= scale;
+ scaledBounds.fTop *= scale;
+ scaledBounds.fRight *= scale;
+ scaledBounds.fBottom *= scale;
+ // move the origin to an integer boundary (gives better results)
+ SkScalar dx = SkScalarFraction(scaledBounds.fLeft);
+ SkScalar dy = SkScalarFraction(scaledBounds.fTop);
+ scaledBounds.offset(-dx, -dy);
+ // get integer boundary
+ SkIRect devPathBounds;
+ scaledBounds.roundOut(&devPathBounds);
+ // pad to allow room for antialiasing
+ devPathBounds.outset(SkScalarCeilToInt(kAntiAliasPad), SkScalarCeilToInt(kAntiAliasPad));
+ // move origin to upper left corner
+ devPathBounds.offsetTo(0,0);
+
+ // draw path to bitmap
+ SkMatrix drawMatrix;
+ drawMatrix.setTranslate(-bounds.left(), -bounds.top());
+ drawMatrix.postScale(scale, scale);
+ drawMatrix.postTranslate(kAntiAliasPad, kAntiAliasPad);
+
+ // setup bitmap backing
+ // Now translate so the bound's UL corner is at the origin
+ drawMatrix.postTranslate(-devPathBounds.fLeft * SK_Scalar1,
+ -devPathBounds.fTop * SK_Scalar1);
+ SkIRect pathBounds = SkIRect::MakeWH(devPathBounds.width(),
+ devPathBounds.height());
+
+ SkBitmap bmp;
+ const SkImageInfo bmImageInfo = SkImageInfo::MakeA8(pathBounds.fRight,
+ pathBounds.fBottom);
+ if (!bmp.tryAllocPixels(bmImageInfo)) {
+ return false;
+ }
+
+ sk_bzero(bmp.getPixels(), bmp.getSafeSize());
+
+ // rasterize path
+ SkPaint paint;
+ if (stroke.isHairlineStyle()) {
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setStrokeWidth(SK_Scalar1);
+ } else {
+ if (stroke.isFillStyle()) {
+ paint.setStyle(SkPaint::kFill_Style);
+ } else {
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setStrokeJoin(stroke.getJoin());
+ paint.setStrokeCap(stroke.getCap());
+ paint.setStrokeWidth(stroke.getWidth());
+ }
+ }
+ paint.setAntiAlias(antiAlias);
+
+ SkDraw draw;
+ sk_bzero(&draw, sizeof(draw));
+
+ SkRasterClip rasterClip;
+ rasterClip.setRect(pathBounds);
+ draw.fRC = &rasterClip;
+ draw.fClip = &rasterClip.bwRgn();
+ draw.fMatrix = &drawMatrix;
+ draw.fBitmap = &bmp;
+
+ draw.drawPathCoverage(path, paint);
+
+ // generate signed distance field
+ devPathBounds.outset(SK_DistanceFieldPad, SK_DistanceFieldPad);
+ int width = devPathBounds.width();
+ int height = devPathBounds.height();
+ // TODO We should really generate this directly into the plot somehow
+ SkAutoSMalloc<1024> dfStorage(width * height * sizeof(unsigned char));
+
+ // Generate signed distance field
+ {
+ SkAutoLockPixels alp(bmp);
+
+ SkGenerateDistanceFieldFromA8Image((unsigned char*)dfStorage.get(),
+ (const unsigned char*)bmp.getPixels(),
+ bmp.width(), bmp.height(), bmp.rowBytes());
+ }
+
+ // add to atlas
+ SkIPoint16 atlasLocation;
+ GrBatchAtlas::AtlasID id;
+ bool success = atlas->addToAtlas(&id, batchTarget, width, height, dfStorage.get(),
+ &atlasLocation);
+ if (!success) {
+ this->flush(batchTarget, dfProcessor, pipeline, drawInfo, *instancesToFlush,
+ maxInstancesPerDraw);
+ this->initDraw(batchTarget, dfProcessor, pipeline);
+ *instancesToFlush = 0;
+
+ SkDEBUGCODE(success =) atlas->addToAtlas(&id, batchTarget, width, height,
+ dfStorage.get(), &atlasLocation);
+ SkASSERT(success);
+
+ }
+
+ // add to cache
+ pathData->fKey.fGenID = path.getGenerationID();
+ pathData->fKey.fDimension = dimension;
+ pathData->fScale = scale;
+ pathData->fID = id;
+ // change the scaled rect to match the size of the inset distance field
+ scaledBounds.fRight = scaledBounds.fLeft +
+ SkIntToScalar(devPathBounds.width() - 2*SK_DistanceFieldInset);
+ scaledBounds.fBottom = scaledBounds.fTop +
+ SkIntToScalar(devPathBounds.height() - 2*SK_DistanceFieldInset);
+ // shift the origin to the correct place relative to the distance field
+ // need to also restore the fractional translation
+ scaledBounds.offset(-SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dx,
+ -SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dy);
+ pathData->fBounds = scaledBounds;
+ // origin we render from is inset from distance field edge
+ atlasLocation.fX += SK_DistanceFieldInset;
+ atlasLocation.fY += SK_DistanceFieldInset;
+ pathData->fAtlasLocation = atlasLocation;
+
+ fPathCache->add(pathData);
+ fPathList->addToTail(pathData);
#ifdef DF_PATH_TRACKING
- ++g_NumCachedPaths;
+ ++g_NumCachedPaths;
#endif
+ return true;
+ }
- return pathData;
-}
+ void drawPath(GrBatchTarget* target,
+ GrBatchAtlas* atlas,
+ const GrPipeline* pipeline,
+ const GrGeometryProcessor* gp,
+ SkPoint* positions,
+ size_t vertexStride,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const PathData* pathData) {
+ GrTexture* texture = atlas->getTexture();
+
+ SkScalar dx = pathData->fBounds.fLeft;
+ SkScalar dy = pathData->fBounds.fTop;
+ SkScalar width = pathData->fBounds.width();
+ SkScalar height = pathData->fBounds.height();
+
+ SkScalar invScale = 1.0f / pathData->fScale;
+ dx *= invScale;
+ dy *= invScale;
+ width *= invScale;
+ height *= invScale;
+
+ SkFixed tx = SkIntToFixed(pathData->fAtlasLocation.fX);
+ SkFixed ty = SkIntToFixed(pathData->fAtlasLocation.fY);
+ SkFixed tw = SkScalarToFixed(pathData->fBounds.width());
+ SkFixed th = SkScalarToFixed(pathData->fBounds.height());
+
+ // vertex positions
+ // TODO make the vertex attributes a struct
+ SkRect r = SkRect::MakeXYWH(dx, dy, width, height);
+ positions->setRectFan(r.left(), r.top(), r.right(), r.bottom(), vertexStride);
+
+ // vertex texture coords
+ SkPoint* textureCoords = positions + 1;
+ textureCoords->setRectFan(SkFixedToFloat(texture->texturePriv().normalizeFixedX(tx)),
+ SkFixedToFloat(texture->texturePriv().normalizeFixedY(ty)),
+ SkFixedToFloat(texture->texturePriv().normalizeFixedX(tx + tw)),
+ SkFixedToFloat(texture->texturePriv().normalizeFixedY(ty + th)),
+ vertexStride);
+ }
-bool GrAADistanceFieldPathRenderer::freeUnusedPlot() {
- // find an unused plot
- GrPlot* plot = fAtlas->getUnusedPlot();
- if (NULL == plot) {
- return false;
+ void initDraw(GrBatchTarget* batchTarget,
+ const GrGeometryProcessor* dfProcessor,
+ const GrPipeline* pipeline) {
+ batchTarget->initDraw(dfProcessor, pipeline);
+
+ // TODO remove this when batch is everywhere
+ GrPipelineInfo init;
+ init.fColorIgnored = fBatch.fColorIgnored;
+ init.fOverrideColor = GrColor_ILLEGAL;
+ init.fCoverageIgnored = fBatch.fCoverageIgnored;
+ init.fUsesLocalCoords = this->usesLocalCoords();
+ dfProcessor->initBatchTracker(batchTarget->currentBatchTracker(), init);
}
- plot->resetRects();
- // remove any paths that use this plot
- PathDataList::Iter iter;
- iter.init(fPathList, PathDataList::Iter::kHead_IterStart);
- PathData* pathData;
- while ((pathData = iter.get())) {
- iter.next();
- if (plot == pathData->fPlot) {
- fPathCache.remove(pathData->fKey);
- fPathList.remove(pathData);
- SkDELETE(pathData);
-#ifdef DF_PATH_TRACKING
- ++g_NumFreedPaths;
-#endif
+ void flush(GrBatchTarget* batchTarget,
+ const GrGeometryProcessor* dfProcessor,
+ const GrPipeline* pipeline,
+ GrDrawTarget::DrawInfo* drawInfo,
+ int instanceCount,
+ int maxInstancesPerDraw) {
+ while (instanceCount) {
+ drawInfo->setInstanceCount(SkTMin(instanceCount, maxInstancesPerDraw));
+ drawInfo->setVertexCount(drawInfo->instanceCount() * drawInfo->verticesPerInstance());
+ drawInfo->setIndexCount(drawInfo->instanceCount() * drawInfo->indicesPerInstance());
+
+ batchTarget->draw(*drawInfo);
+
+ drawInfo->setStartVertex(drawInfo->startVertex() + drawInfo->vertexCount());
+ instanceCount -= drawInfo->instanceCount();
+ }
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+
+ bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE {
+ AADistanceFieldPathBatch* that = t->cast<AADistanceFieldPathBatch>();
+
+ // TODO we could actually probably do a bunch of this work on the CPU, ie map viewMatrix,
+ // maybe upload color via attribute
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
}
+
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ return true;
}
-
- // tell the atlas to free the plot
- GrAtlas::RemovePlot(&fPlotUsage, plot);
-
- return true;
-}
-bool GrAADistanceFieldPathRenderer::internalDrawPath(GrDrawTarget* target,
- GrPipelineBuilder* pipelineBuilder,
- GrColor color,
- const SkMatrix& viewMatrix,
- const SkPath& path,
- const PathData* pathData) {
- GrTexture* texture = fAtlas->getTexture();
- GrPipelineBuilder::AutoRestoreFragmentProcessors arfp(pipelineBuilder);
-
- SkASSERT(pathData->fPlot);
- GrDrawTarget::DrawToken drawToken = target->getCurrentDrawToken();
- pathData->fPlot->setDrawToken(drawToken);
-
- // set up any flags
- uint32_t flags = 0;
- flags |= viewMatrix.isSimilarity() ? kSimilarity_DistanceFieldEffectFlag : 0;
-
- GrTextureParams params(SkShader::kRepeat_TileMode, GrTextureParams::kBilerp_FilterMode);
- if (flags != fEffectFlags || fCachedGeometryProcessor->color() != color ||
- !fCachedGeometryProcessor->viewMatrix().cheapEqualTo(viewMatrix)) {
- fCachedGeometryProcessor.reset(GrDistanceFieldNoGammaTextureEffect::Create(color,
- viewMatrix,
- texture,
- params,
- flags,
- false));
- fEffectFlags = flags;
+ struct BatchTracker {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+ GrBatchAtlas* fAtlas;
+ PathCache* fPathCache;
+ PathDataList* fPathList;
+};
+
+bool GrAADistanceFieldPathRenderer::onDrawPath(GrDrawTarget* target,
+ GrPipelineBuilder* pipelineBuilder,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const SkStrokeRec& stroke,
+ bool antiAlias) {
+ // we've already bailed on inverse filled paths, so this is safe
+ if (path.isEmpty()) {
+ return true;
}
- void* vertices = NULL;
- bool success = target->reserveVertexAndIndexSpace(4,
- fCachedGeometryProcessor->getVertexStride(),
- 0, &vertices, NULL);
- SkASSERT(fCachedGeometryProcessor->getVertexStride() == 2 * sizeof(SkPoint));
- GrAlwaysAssert(success);
-
- SkScalar dx = pathData->fBounds.fLeft;
- SkScalar dy = pathData->fBounds.fTop;
- SkScalar width = pathData->fBounds.width();
- SkScalar height = pathData->fBounds.height();
-
- SkScalar invScale = 1.0f/pathData->fScale;
- dx *= invScale;
- dy *= invScale;
- width *= invScale;
- height *= invScale;
-
- SkFixed tx = SkIntToFixed(pathData->fAtlasLocation.fX);
- SkFixed ty = SkIntToFixed(pathData->fAtlasLocation.fY);
- SkFixed tw = SkScalarToFixed(pathData->fBounds.width());
- SkFixed th = SkScalarToFixed(pathData->fBounds.height());
-
- // vertex positions
- SkRect r = SkRect::MakeXYWH(dx, dy, width, height);
- size_t vertSize = 2 * sizeof(SkPoint);
- SkPoint* positions = reinterpret_cast<SkPoint*>(vertices);
- positions->setRectFan(r.left(), r.top(), r.right(), r.bottom(), vertSize);
-
- // vertex texture coords
- intptr_t intPtr = reinterpret_cast<intptr_t>(positions);
- SkPoint* textureCoords = reinterpret_cast<SkPoint*>(intPtr + vertSize - sizeof(SkPoint));
- textureCoords->setRectFan(SkFixedToFloat(texture->texturePriv().normalizeFixedX(tx)),
- SkFixedToFloat(texture->texturePriv().normalizeFixedY(ty)),
- SkFixedToFloat(texture->texturePriv().normalizeFixedX(tx + tw)),
- SkFixedToFloat(texture->texturePriv().normalizeFixedY(ty + th)),
- vertSize);
-
- viewMatrix.mapRect(&r);
- target->setIndexSourceToBuffer(fContext->getQuadIndexBuffer());
- target->drawIndexedInstances(pipelineBuilder, fCachedGeometryProcessor.get(),
- kTriangles_GrPrimitiveType, 1, 4, 6, &r);
- target->resetVertexSource();
-
+ SkASSERT(fContext);
+
+ if (!fAtlas) {
+ // Create a new atlas
+ GrSurfaceDesc desc;
+ desc.fFlags = kNone_GrSurfaceFlags;
+ desc.fWidth = ATLAS_TEXTURE_WIDTH;
+ desc.fHeight = ATLAS_TEXTURE_HEIGHT;
+ desc.fConfig = kAlpha_8_GrPixelConfig;
+
+ // We don't want to flush the context so we claim we're in the middle of flushing so as to
+ // guarantee we do not recieve a texture with pending IO
+ GrTexture* texture = fContext->refScratchTexture(desc, GrContext::kApprox_ScratchTexMatch,
+ true);
+ if (texture) {
+ fAtlas = SkNEW_ARGS(GrBatchAtlas, (texture, NUM_PLOTS_X, NUM_PLOTS_Y));
+ } else {
+ return false;
+ }
+ fAtlas->registerEvictionCallback(&GrAADistanceFieldPathRenderer::HandleEviction,
+ (void*)this);
+ }
+
+ AADistanceFieldPathBatch::Geometry geometry(stroke);
+ geometry.fPath = path;
+ geometry.fAntiAlias = antiAlias;
+
+ SkAutoTUnref<GrBatch> batch(AADistanceFieldPathBatch::Create(geometry, color, viewMatrix,
+ fAtlas, &fPathCache, &fPathList));
+
+ SkRect bounds = path.getBounds();
+ viewMatrix.mapRect(&bounds);
+ target->drawBatch(pipelineBuilder, batch, &bounds);
+
return true;
}
diff --git a/src/gpu/GrAADistanceFieldPathRenderer.h b/src/gpu/GrAADistanceFieldPathRenderer.h
index c145e2fe3f..6d7c38d6c3 100755
--- a/src/gpu/GrAADistanceFieldPathRenderer.h
+++ b/src/gpu/GrAADistanceFieldPathRenderer.h
@@ -9,7 +9,7 @@
#ifndef GrAADistanceFieldPathRenderer_DEFINED
#define GrAADistanceFieldPathRenderer_DEFINED
-#include "GrAtlas.h"
+#include "GrBatchAtlas.h"
#include "GrPathRenderer.h"
#include "GrRect.h"
@@ -17,7 +17,6 @@
#include "SkTDynamicHash.h"
class GrContext;
-class GrPlot;
class GrAADistanceFieldPathRenderer : public GrPathRenderer {
public:
@@ -55,11 +54,11 @@ private:
return other.fGenID == fGenID && other.fDimension == fDimension;
}
};
- Key fKey;
- SkScalar fScale;
- GrPlot* fPlot;
- SkRect fBounds;
- SkIPoint16 fAtlasLocation;
+ Key fKey;
+ SkScalar fScale;
+ GrBatchAtlas::AtlasID fID;
+ SkRect fBounds;
+ SkIPoint16 fAtlasLocation;
SK_DECLARE_INTERNAL_LLIST_INTERFACE(PathData);
static inline const Key& GetKey(const PathData& data) {
@@ -70,26 +69,19 @@ private:
return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&key), sizeof(key));
}
};
+
+ static void HandleEviction(GrBatchAtlas::AtlasID, void*);
+
typedef SkTInternalLList<PathData> PathDataList;
GrContext* fContext;
- GrAtlas* fAtlas;
- SkAutoTUnref<GrGeometryProcessor> fCachedGeometryProcessor;
- // current set of flags used to create the cached geometry processor
- uint32_t fEffectFlags;
- GrAtlas::ClientPlotUsage fPlotUsage;
+ GrBatchAtlas* fAtlas;
SkTDynamicHash<PathData, PathData::Key> fPathCache;
PathDataList fPathList;
- bool internalDrawPath(GrDrawTarget*, GrPipelineBuilder*, GrColor, const SkMatrix& viewMatrix,
- const SkPath& path, const PathData* pathData);
- inline bool uploadPath(GrPlot** plot, SkIPoint16* atlasLocation, int width, int height,
- void* dfStorage);
- PathData* addPathToAtlas(const SkPath& path, const SkStrokeRec& stroke, bool antiAlias,
- uint32_t dimension, SkScalar scale);
- bool freeUnusedPlot();
-
typedef GrPathRenderer INHERITED;
+
+ friend class AADistanceFieldPathBatch;
};
#endif
diff --git a/src/gpu/GrBatch.h b/src/gpu/GrBatch.h
index 48327c61c2..7d621e046a 100644
--- a/src/gpu/GrBatch.h
+++ b/src/gpu/GrBatch.h
@@ -10,6 +10,7 @@
#include <new>
// TODO remove this header when we move entirely to batch
+#include "GrDrawTarget.h"
#include "GrGeometryProcessor.h"
#include "SkRefCnt.h"
#include "SkThread.h"
@@ -42,7 +43,7 @@ struct GrInitInvariantOutput;
class GrBatch : public SkRefCnt {
public:
SK_DECLARE_INST_COUNT(GrBatch)
- GrBatch() : fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) }
+ GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) }
virtual ~GrBatch() {}
virtual const char* name() const = 0;
diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp
new file mode 100644
index 0000000000..4b242820e3
--- /dev/null
+++ b/src/gpu/GrBatchAtlas.cpp
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchAtlas.h"
+#include "GrBatchTarget.h"
+#include "GrGpu.h"
+#include "GrRectanizer.h"
+#include "GrTracing.h"
+
+// for testing
+#define ATLAS_STATS 0
+#if ATLAS_STATS
+static int g_UploadCount = 0;
+#endif
+
+static inline void adjust_for_offset(SkIPoint16* loc, const SkIPoint16& offset) {
+ loc->fX += offset.fX;
+ loc->fY += offset.fY;
+}
+
+static GrBatchAtlas::AtlasID create_id(int index, int generation) {
+ // Generation ID can roll over because we only check for equality
+ SkASSERT(index < (1 << 16));
+ return generation << 16 | index;
+}
+
+// The backing GrTexture for a GrBatchAtlas is broken into a spatial grid of GrBatchPlots.
+// The GrBatchPlots keep track of subimage placement via their GrRectanizer. In turn, a GrBatchPlot
+// manages the lifetime of its data using two tokens, a last ref toke and a last upload token.
+// Once a GrBatchPlot is "full" (i.e. there is no room for the new subimage according to the
+// GrRectanizer), it can no longer be used unless the last ref on the GrPlot has already been
+// flushed through to the gpu.
+
+class BatchPlot : public SkRefCnt {
+public:
+ typedef GrBatchAtlas::BatchToken BatchToken;
+ SK_DECLARE_INST_COUNT(BatchPlot);
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(BatchPlot);
+
+ // index() refers to the index of the plot in the owning GrAtlas's plot array. genID() is a
+ // monotonically incrementing number which is bumped every time the cpu backing store is
+ // wiped, or when the plot itself is evicted from the atlas(ie, there is continuity in genID()
+ // across atlas spills)
+ int index() const { return fIndex; }
+ int genID() const { return fGenID; }
+ GrBatchAtlas::AtlasID id() { return fID; }
+
+ GrTexture* texture() const { return fTexture; }
+
+ bool addSubImage(int width, int height, const void* image, SkIPoint16* loc, size_t rowBytes) {
+ if (!fRects->addRect(width, height, loc)) {
+ return false;
+ }
+
+ SkASSERT(fData);
+ const unsigned char* imagePtr = (const unsigned char*)image;
+ // point ourselves at the right starting spot
+ unsigned char* dataPtr = fData;
+ dataPtr += fBytesPerPixel * fWidth * loc->fY;
+ dataPtr += fBytesPerPixel * loc->fX;
+ // copy into the data buffer
+ for (int i = 0; i < height; ++i) {
+ memcpy(dataPtr, imagePtr, rowBytes);
+ dataPtr += fBytesPerPixel * fWidth;
+ imagePtr += rowBytes;
+ }
+
+ fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
+ adjust_for_offset(loc, fOffset);
+ SkDEBUGCODE(fDirty = true;)
+
+#if ATLAS_STATS
+ ++g_UploadCount;
+#endif
+
+ return true;
+ }
+
+ // to manage the lifetime of a plot, we use two tokens. We use last upload token to know when
+ // we can 'piggy back' uploads, ie if the last upload hasn't been flushed to gpu, we don't need
+ // to issue a new upload even if we update the cpu backing store. We use lastref to determine
+ // when we can evict a plot from the cache, ie if the last ref has already flushed through
+ // the gpu then we can reuse the plot
+ BatchToken lastUploadToken() const { return fLastUpload; }
+ BatchToken lastRefToken() const { return fLastRef; }
+ void setLastUploadToken(BatchToken batchToken) { fLastUpload = batchToken; }
+ void setLastRefToken(BatchToken batchToken) { fLastRef = batchToken; }
+
+ void uploadToTexture(GrBatchTarget::TextureUploader uploader) {
+ // We should only be issuing uploads if we are in fact dirty
+ SkASSERT(fDirty);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture");
+ SkASSERT(fTexture);
+ size_t rowBytes = fBytesPerPixel * fRects->width();
+ const unsigned char* dataPtr = fData;
+ dataPtr += rowBytes * fDirtyRect.fTop;
+ dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
+ uploader.writeTexturePixels(fTexture,
+ fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
+ fDirtyRect.width(), fDirtyRect.height(),
+ fTexture->config(), dataPtr, rowBytes);
+ fDirtyRect.setEmpty();
+ SkDEBUGCODE(fDirty = false;)
+ }
+
+ void resetRects() {
+ SkASSERT(fRects);
+ fRects->reset();
+ fGenID++;
+ fID = create_id(fIndex, fGenID);
+
+ // zero out the plot
+ SkASSERT(fData);
+ memset(fData, 0, fBytesPerPixel * fWidth * fHeight);
+
+ fDirtyRect.setEmpty();
+ SkDEBUGCODE(fDirty = false;)
+ }
+
+ int x() const { return fX; }
+ int y() const { return fY; }
+
+private:
+ BatchPlot()
+ : fLastUpload(0)
+ , fLastRef(0)
+ , fIndex(-1)
+ , fGenID(-1)
+ , fID(0)
+ , fData(NULL)
+ , fWidth(0)
+ , fHeight(0)
+ , fX(0)
+ , fY(0)
+ , fTexture(NULL)
+ , fRects(NULL)
+ , fAtlas(NULL)
+ , fBytesPerPixel(1)
+ #ifdef SK_DEBUG
+ , fDirty(false)
+ #endif
+ {
+ fOffset.set(0, 0);
+ }
+
+ ~BatchPlot() {
+ SkDELETE_ARRAY(fData);
+ fData = NULL;
+ delete fRects;
+ }
+
+ void init(GrBatchAtlas* atlas, GrTexture* texture, int index, uint32_t generation,
+ int offX, int offY, int width, int height, size_t bpp) {
+ fIndex = index;
+ fGenID = generation;
+ fID = create_id(index, generation);
+ fWidth = width;
+ fHeight = height;
+ fX = offX;
+ fY = offY;
+ fRects = GrRectanizer::Factory(width, height);
+ fAtlas = atlas;
+ fOffset.set(offX * width, offY * height);
+ fBytesPerPixel = bpp;
+ fData = NULL;
+ fDirtyRect.setEmpty();
+ SkDEBUGCODE(fDirty = false;)
+ fTexture = texture;
+
+ // allocate backing store
+ fData = SkNEW_ARRAY(unsigned char, fBytesPerPixel * width * height);
+ memset(fData, 0, fBytesPerPixel * width * height);
+ }
+
+ BatchToken fLastUpload;
+ BatchToken fLastRef;
+
+ uint32_t fIndex;
+ uint32_t fGenID;
+ GrBatchAtlas::AtlasID fID;
+ unsigned char* fData;
+ int fWidth;
+ int fHeight;
+ int fX;
+ int fY;
+ GrTexture* fTexture;
+ GrRectanizer* fRects;
+ GrBatchAtlas* fAtlas;
+ SkIPoint16 fOffset; // the offset of the plot in the backing texture
+ size_t fBytesPerPixel;
+ SkIRect fDirtyRect;
+ SkDEBUGCODE(bool fDirty;)
+
+ friend class GrBatchAtlas;
+
+ typedef SkRefCnt INHERITED;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+class GrPlotUploader : public GrBatchTarget::Uploader {
+public:
+ GrPlotUploader(BatchPlot* plot)
+ : INHERITED(plot->lastUploadToken())
+ , fPlot(SkRef(plot)) {
+ SkASSERT(plot);
+ }
+
+ void upload(GrBatchTarget::TextureUploader uploader) SK_OVERRIDE {
+ fPlot->uploadToTexture(uploader);
+ }
+
+private:
+ SkAutoTUnref<BatchPlot> fPlot;
+
+ typedef GrBatchTarget::Uploader INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
+ : fTexture(texture)
+ , fNumPlotsX(numPlotsX)
+ , fNumPlotsY(numPlotsY)
+ , fPlotWidth(texture->width() / numPlotsX)
+ , fPlotHeight(texture->height() / numPlotsY) {
+ SkASSERT(fPlotWidth * fNumPlotsX == texture->width());
+ SkASSERT(fPlotHeight * fNumPlotsY == texture->height());
+
+ // We currently do not support compressed atlases...
+ SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig));
+
+ // set up allocated plots
+ fBPP = GrBytesPerPixel(texture->desc().fConfig);
+ fPlotArray = SkNEW_ARRAY(SkAutoTUnref<BatchPlot>, (fNumPlotsX * fNumPlotsY));
+
+ SkAutoTUnref<BatchPlot>* currPlot = fPlotArray;
+ for (int y = fNumPlotsY - 1, r = 0; y >= 0; --y, ++r) {
+ for (int x = fNumPlotsX - 1, c = 0; x >= 0; --x, ++c) {
+ int id = r * fNumPlotsX + c;
+ currPlot->reset(SkNEW(BatchPlot));
+ (*currPlot)->init(this, texture, id, 0, x, y, fPlotWidth, fPlotHeight, fBPP);
+
+ // build LRU list
+ fPlotList.addToHead(currPlot->get());
+ ++currPlot;
+ }
+ }
+}
+
+GrBatchAtlas::~GrBatchAtlas() {
+ SkSafeUnref(fTexture);
+ SkDELETE_ARRAY(fPlotArray);
+
+#if ATLAS_STATS
+ SkDebugf("Num uploads: %d\n", g_UploadCount);
+#endif
+}
+
+void GrBatchAtlas::processEviction(AtlasID id) {
+ for (int i = 0; i < fEvictionCallbacks.count(); i++) {
+ (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
+ }
+}
+
+void GrBatchAtlas::makeMRU(BatchPlot* plot) {
+ if (fPlotList.head() == plot) {
+ return;
+ }
+
+ fPlotList.remove(plot);
+ fPlotList.addToHead(plot);
+}
+
+inline void GrBatchAtlas::updatePlot(GrBatchTarget* batchTarget, AtlasID* id, BatchPlot* plot) {
+ this->makeMRU(plot);
+
+ // If our most recent upload has already occurred then we have to insert a new
+ // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
+ // This new update will piggy back on that previously scheduled update.
+ if (batchTarget->isIssued(plot->lastUploadToken())) {
+ plot->setLastUploadToken(batchTarget->asapToken());
+ SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (plot)));
+ batchTarget->upload(uploader);
+ }
+ *id = plot->id();
+}
+
+bool GrBatchAtlas::addToAtlas(AtlasID* id, GrBatchTarget* batchTarget,
+ int width, int height, const void* image, SkIPoint16* loc) {
+ // We should already have a texture, TODO clean this up
+ SkASSERT(fTexture && width < fPlotWidth && height < fPlotHeight);
+
+ // now look through all allocated plots for one we can share, in Most Recently Refed order
+ GrBatchPlotList::Iter plotIter;
+ plotIter.init(fPlotList, GrBatchPlotList::Iter::kHead_IterStart);
+ BatchPlot* plot;
+ while ((plot = plotIter.get())) {
+ if (plot->addSubImage(width, height, image, loc, fBPP * width)) {
+ this->updatePlot(batchTarget, id, plot);
+ return true;
+ }
+ plotIter.next();
+ }
+
+ // If the above fails, then see if the least recently refed plot has already been flushed to the
+ // gpu
+ plotIter.init(fPlotList, GrBatchPlotList::Iter::kTail_IterStart);
+ plot = plotIter.get();
+ SkASSERT(plot);
+ if (batchTarget->isIssued(plot->lastRefToken())) {
+ this->processEviction(plot->id());
+ plot->resetRects();
+ SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc, fBPP * width);
+ SkASSERT(verify);
+ this->updatePlot(batchTarget, id, plot);
+ return true;
+ }
+
+ // The least recently refed plot hasn't been flushed to the gpu yet, however, if we have flushed
+ // it to the batch target than we can reuse it. Our last ref token is guaranteed to be less
+ // than or equal to the current token. If its 'less than' the current token, than we can spin
+ // off the plot(ie let the batch target manage it) and create a new plot in its place in our
+ // array. If it is equal to the currentToken, then the caller has to flush draws to the batch
+ // target so we can spin off the plot
+ if (plot->lastRefToken() == batchTarget->currentToken()) {
+ return false;
+ }
+
+ // We take an extra ref here so our plot isn't deleted when we reset its index in the array.
+ plot->ref();
+ int index = plot->index();
+ int x = plot->x();
+ int y = plot->y();
+ int generation = plot->genID();
+
+ this->processEviction(plot->id());
+ fPlotList.remove(plot);
+ SkAutoTUnref<BatchPlot>& newPlot = fPlotArray[plot->index()];
+ newPlot.reset(SkNEW(BatchPlot));
+ newPlot->init(this, fTexture, index, ++generation, x, y, fPlotWidth, fPlotHeight, fBPP);
+
+ fPlotList.addToHead(newPlot.get());
+ SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc, fBPP * width);
+ SkASSERT(verify);
+ newPlot->setLastUploadToken(batchTarget->currentToken());
+ SkAutoTUnref<GrPlotUploader> uploader(SkNEW_ARGS(GrPlotUploader, (newPlot)));
+ batchTarget->upload(uploader);
+ *id = newPlot->id();
+ plot->unref();
+ return true;
+}
+
+bool GrBatchAtlas::hasID(AtlasID id) {
+ int index = this->getIndexFromID(id);
+ SkASSERT(index < fNumPlotsX * fNumPlotsY);
+ return fPlotArray[index]->genID() == this->getGenerationFromID(id);
+}
+
+void GrBatchAtlas::setLastRefToken(AtlasID id, BatchToken batchToken) {
+ SkASSERT(this->hasID(id));
+ int index = this->getIndexFromID(id);
+ this->makeMRU(fPlotArray[index]);
+ fPlotArray[index]->setLastRefToken(batchToken);
+}
diff --git a/src/gpu/GrBatchAtlas.h b/src/gpu/GrBatchAtlas.h
new file mode 100644
index 0000000000..b514b9d74f
--- /dev/null
+++ b/src/gpu/GrBatchAtlas.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchAtlas_DEFINED
+#define GrBatchAtlas_DEFINED
+
+#include "GrTexture.h"
+#include "SkPoint.h"
+#include "SkTDArray.h"
+#include "SkTInternalLList.h"
+
+class BatchPlot;
+class GrBatchTarget;
+class GrRectanizer;
+
+typedef SkTInternalLList<BatchPlot> GrBatchPlotList;
+
+class GrBatchAtlas {
+public:
+ typedef uint64_t BatchToken;
+ // An AtlasID is an opaque handle which callers can use to determine if the atlas contains
+ // a specific piece of data
+ typedef uint32_t AtlasID;
+
+ // A function pointer for use as a callback during eviction. Whenever GrBatchAtlas evicts a
+ // specific AtlasID, it will call all of the registered listeners so they can optionally process
+ // the eviction
+ typedef void (*EvictionFunc)(GrBatchAtlas::AtlasID, void*);
+
+ GrBatchAtlas(GrTexture*, int numPlotsX, int numPlotsY);
+ ~GrBatchAtlas();
+
+ // Adds a width x height subimage to the atlas. Upon success it returns
+ // the containing GrPlot and absolute location in the backing texture.
+ // NULL is returned if the subimage cannot fit in the atlas.
+ // If provided, the image data will be written to the CPU-side backing bitmap.
+ bool addToAtlas(AtlasID*, GrBatchTarget*, int width, int height, const void* image,
+ SkIPoint16* loc);
+
+ GrTexture* getTexture() const { return fTexture; }
+
+ bool hasID(AtlasID id);
+ void setLastRefToken(AtlasID id, BatchToken batchToken);
+ void registerEvictionCallback(EvictionFunc func, void* userData) {
+ EvictionData* data = fEvictionCallbacks.append();
+ data->fFunc = func;
+ data->fData = userData;
+ }
+
+private:
+ int getIndexFromID(AtlasID id) {
+ return id & 0xffff;
+ }
+
+ int getGenerationFromID(AtlasID id) {
+ return (id >> 16) & 0xffff;
+ }
+
+ inline void updatePlot(GrBatchTarget*, AtlasID*, BatchPlot*);
+
+ inline void makeMRU(BatchPlot* plot);
+
+ inline void processEviction(AtlasID);
+
+ GrTexture* fTexture;
+ int fNumPlotsX;
+ int fNumPlotsY;
+ int fPlotWidth;
+ int fPlotHeight;
+ size_t fBPP;
+
+ struct EvictionData {
+ EvictionFunc fFunc;
+ void* fData;
+ };
+
+ SkTDArray<EvictionData> fEvictionCallbacks;
+ // allocated array of GrBatchPlots
+ SkAutoTUnref<BatchPlot>* fPlotArray;
+ // LRU list of GrPlots (MRU at head - LRU at tail)
+ GrBatchPlotList fPlotList;
+};
+
+#endif
diff --git a/src/gpu/GrBatchTarget.cpp b/src/gpu/GrBatchTarget.cpp
index 21658d2a01..4c24a80c95 100644
--- a/src/gpu/GrBatchTarget.cpp
+++ b/src/gpu/GrBatchTarget.cpp
@@ -7,42 +7,49 @@
#include "GrBatchTarget.h"
+#include "GrBatchAtlas.h"
#include "GrPipeline.h"
-/*
-void GrBatchTarget::flush() {
- FlushBuffer::Iter iter(fFlushBuffer);
- fVertexPool->unmap();
- fIndexPool->unmap();
+GrBatchTarget::GrBatchTarget(GrGpu* gpu,
+ GrVertexBufferAllocPool* vpool,
+ GrIndexBufferAllocPool* ipool)
+ : fGpu(gpu)
+ , fVertexPool(vpool)
+ , fIndexPool(ipool)
+ , fFlushBuffer(kFlushBufferInitialSizeInBytes)
+ , fIter(fFlushBuffer)
+ , fNumberOfDraws(0)
+ , fCurrentToken(0)
+ , fLastFlushedToken(0)
+ , fInlineUpdatesIndex(0) {
+}
- while (iter.next()) {
- GrProgramDesc desc;
- BufferedFlush* bf = iter.get();
- const GrPipeline* pipeline = bf->fPipeline;
- const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
- fGpu->buildProgramDesc(&desc, *primProc, *pipeline, bf->fBatchTracker);
-
- GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
- for (int i = 0; i < bf->fDraws.count(); i++) {
- fGpu->draw(args, bf->fDraws[i]);
- }
- }
- fFlushBuffer.reset();
-}*/
-/*
-void GrBatchTarget::flushNext(int n) {
+void GrBatchTarget::flushNext(int n) {
for (; n > 0; n--) {
+ fLastFlushedToken++;
SkDEBUGCODE(bool verify =) fIter.next();
SkASSERT(verify);
- GrProgramDesc desc;
+
BufferedFlush* bf = fIter.get();
+
+ // Flush all texture uploads
+ int uploadCount = fInlineUploads.count();
+ while (fInlineUpdatesIndex < uploadCount &&
+ fInlineUploads[fInlineUpdatesIndex]->lastUploadToken() <= fLastFlushedToken) {
+ fInlineUploads[fInlineUpdatesIndex++]->upload(TextureUploader(fGpu));
+ }
+
+ GrProgramDesc desc;
const GrPipeline* pipeline = bf->fPipeline;
const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
fGpu->buildProgramDesc(&desc, *primProc, *pipeline, bf->fBatchTracker);
GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
- for (int i = 0; i < bf->fDraws.count(); i++) {
- fGpu->draw(args, bf->fDraws[i]);
+
+ int drawCount = bf->fDraws.count();
+ const SkSTArray<1, DrawInfo, true>& draws = bf->fDraws;
+ for (int i = 0; i < drawCount; i++) {
+ fGpu->draw(args, draws[i]);
}
}
-}*/
+}
diff --git a/src/gpu/GrBatchTarget.h b/src/gpu/GrBatchTarget.h
index ae046c1ee1..9bd6b9409c 100644
--- a/src/gpu/GrBatchTarget.h
+++ b/src/gpu/GrBatchTarget.h
@@ -8,6 +8,7 @@
#ifndef GrBatchBuffer_DEFINED
#define GrBatchBuffer_DEFINED
+#include "GrBatchAtlas.h"
#include "GrBufferAllocPool.h"
#include "GrPendingProgramElement.h"
#include "GrPipeline.h"
@@ -24,51 +25,90 @@ class GrVertexBufferAllocPool;
class GrBatchTarget : public SkNoncopyable {
public:
+ typedef GrBatchAtlas::BatchToken BatchToken;
GrBatchTarget(GrGpu* gpu,
GrVertexBufferAllocPool* vpool,
- GrIndexBufferAllocPool* ipool)
- : fGpu(gpu)
- , fVertexPool(vpool)
- , fIndexPool(ipool)
- , fFlushBuffer(kFlushBufferInitialSizeInBytes)
- , fIter(fFlushBuffer)
- , fNumberOfDraws(0) {}
+ GrIndexBufferAllocPool* ipool);
typedef GrDrawTarget::DrawInfo DrawInfo;
void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
GrNEW_APPEND_TO_RECORDER(fFlushBuffer, BufferedFlush, (primProc, pipeline));
fNumberOfDraws++;
+ fCurrentToken++;
+ }
+
+ class TextureUploader {
+ public:
+ TextureUploader(GrGpu* gpu) : fGpu(gpu) { SkASSERT(gpu); }
+
+ /**
+ * Updates the pixels in a rectangle of a texture.
+ *
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param buffer memory to read pixels from
+ * @param rowBytes number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
+ */
+ bool writeTexturePixels(GrTexture* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes) {
+ return fGpu->writeTexturePixels(texture, left, top, width, height, config, buffer,
+ rowBytes);
+ }
+
+ private:
+ GrGpu* fGpu;
+ };
+
+ class Uploader : public SkRefCnt {
+ public:
+ Uploader(BatchToken lastUploadToken) : fLastUploadToken(lastUploadToken) {}
+ BatchToken lastUploadToken() const { return fLastUploadToken; }
+ virtual void upload(TextureUploader)=0;
+
+ private:
+ BatchToken fLastUploadToken;
+ };
+
+ void upload(Uploader* upload) {
+ if (this->asapToken() == upload->lastUploadToken()) {
+ fAsapUploads.push_back().reset(SkRef(upload));
+ } else {
+ fInlineUploads.push_back().reset(SkRef(upload));
+ }
}
void draw(const GrDrawTarget::DrawInfo& draw) {
fFlushBuffer.back().fDraws.push_back(draw);
}
- // TODO this is temporary until batch is everywhere
- //void flush();
+ bool isIssued(BatchToken token) const { return fLastFlushedToken >= token; }
+ BatchToken currentToken() const { return fCurrentToken; }
+ BatchToken asapToken() const { return fLastFlushedToken + 1; }
+
+ // TODO much of this complexity goes away when batch is everywhere
void resetNumberOfDraws() { fNumberOfDraws = 0; }
int numberOfDraws() const { return fNumberOfDraws; }
- void preFlush() { fIter = FlushBuffer::Iter(fFlushBuffer); }
- void flushNext(int n) {
- for (; n > 0; n--) {
- SkDEBUGCODE(bool verify =) fIter.next();
- SkASSERT(verify);
- GrProgramDesc desc;
- BufferedFlush* bf = fIter.get();
- const GrPipeline* pipeline = bf->fPipeline;
- const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
- fGpu->buildProgramDesc(&desc, *primProc, *pipeline, bf->fBatchTracker);
-
- GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
-
- int drawCount = bf->fDraws.count();
- const SkSTArray<1, DrawInfo, true>& draws = bf->fDraws;
- for (int i = 0; i < drawCount; i++) {
- fGpu->draw(args, draws[i]);
- }
+ void preFlush() {
+ int updateCount = fAsapUploads.count();
+ for (int i = 0; i < updateCount; i++) {
+ fAsapUploads[i]->upload(TextureUploader(fGpu));
}
+ fInlineUpdatesIndex = 0;
+ fIter = FlushBuffer::Iter(fFlushBuffer);
+ }
+ void flushNext(int n);
+ void postFlush() {
+ SkASSERT(!fIter.next());
+ fFlushBuffer.reset();
+ fAsapUploads.reset();
+ fInlineUploads.reset();
}
- void postFlush() { SkASSERT(!fIter.next()); fFlushBuffer.reset(); }
// TODO This goes away when everything uses batch
GrBatchTracker* currentBatchTracker() {
@@ -81,6 +121,8 @@ public:
GrVertexBufferAllocPool* vertexPool() { return fVertexPool; }
GrIndexBufferAllocPool* indexPool() { return fIndexPool; }
+ const static int kVertsPerRect = 4;
+ const static int kIndicesPerRect = 6;
const GrIndexBuffer* quadIndexBuffer() const { return fGpu->getQuadIndexBuffer(); }
// A helper for draws which overallocate and then return data to the pool
@@ -118,6 +160,11 @@ private:
// TODO this is temporary
FlushBuffer::Iter fIter;
int fNumberOfDraws;
+ BatchToken fCurrentToken;
+ BatchToken fLastFlushedToken; // The next token to be flushed
+ SkTArray<SkAutoTUnref<Uploader>, true> fAsapUploads;
+ SkTArray<SkAutoTUnref<Uploader>, true> fInlineUploads;
+ int fInlineUpdatesIndex;
};
#endif
diff --git a/src/gpu/GrBitmapTextContext.h b/src/gpu/GrBitmapTextContext.h
index 2c508736dc..e181fd2b69 100644
--- a/src/gpu/GrBitmapTextContext.h
+++ b/src/gpu/GrBitmapTextContext.h
@@ -10,7 +10,8 @@
#include "GrTextContext.h"
-class GrGeometryProcessor;
+#include "GrGeometryProcessor.h"
+
class GrTextStrike;
/*
diff --git a/src/gpu/GrDrawTarget.h b/src/gpu/GrDrawTarget.h
index 88207da599..471acea14f 100644
--- a/src/gpu/GrDrawTarget.h
+++ b/src/gpu/GrDrawTarget.h
@@ -31,6 +31,7 @@
class GrBatch;
class GrClip;
class GrDrawTargetCaps;
+class GrGeometryProcessor;
class GrPath;
class GrPathRange;
class GrPipeline;
diff --git a/src/gpu/GrPipelineBuilder.cpp b/src/gpu/GrPipelineBuilder.cpp
index 2a17c2863a..b3b602f469 100644
--- a/src/gpu/GrPipelineBuilder.cpp
+++ b/src/gpu/GrPipelineBuilder.cpp
@@ -7,6 +7,7 @@
#include "GrPipelineBuilder.h"
+#include "GrBatch.h"
#include "GrBlend.h"
#include "GrPaint.h"
#include "GrPipeline.h"
diff --git a/src/gpu/GrPipelineBuilder.h b/src/gpu/GrPipelineBuilder.h
index 85785723d4..ae9ca9b067 100644
--- a/src/gpu/GrPipelineBuilder.h
+++ b/src/gpu/GrPipelineBuilder.h
@@ -8,7 +8,6 @@
#ifndef GrPipelineBuilder_DEFINED
#define GrPipelineBuilder_DEFINED
-#include "GrBatch.h"
#include "GrBlend.h"
#include "GrClip.h"
#include "GrDrawTargetCaps.h"
@@ -24,6 +23,7 @@
#include "effects/GrPorterDuffXferProcessor.h"
#include "effects/GrSimpleTextureEffect.h"
+class GrBatch;
class GrDrawTargetCaps;
class GrPaint;
class GrTexture;
diff --git a/src/gpu/GrTargetCommands.h b/src/gpu/GrTargetCommands.h
index 035fbeb5bd..51acee0c46 100644
--- a/src/gpu/GrTargetCommands.h
+++ b/src/gpu/GrTargetCommands.h
@@ -8,6 +8,7 @@
#ifndef GrTargetCommands_DEFINED
#define GrTargetCommands_DEFINED
+#include "GrBatch.h"
#include "GrBatchTarget.h"
#include "GrDrawTarget.h"
#include "GrGpu.h"
diff --git a/src/gpu/gl/builders/GrGLVertexShaderBuilder.h b/src/gpu/gl/builders/GrGLVertexShaderBuilder.h
index 71a60a0804..7f7471d422 100644
--- a/src/gpu/gl/builders/GrGLVertexShaderBuilder.h
+++ b/src/gpu/gl/builders/GrGLVertexShaderBuilder.h
@@ -9,6 +9,7 @@
#define GrGLVertexShader_DEFINED
#include "GrGLShaderBuilder.h"
+#include "GrGeometryProcessor.h"
class GrGLVarying;