aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gpu/GrBatchAtlas.cpp29
-rw-r--r--src/gpu/GrBatchAtlas.h13
-rw-r--r--src/gpu/GrResourceProvider.cpp13
-rw-r--r--src/gpu/GrResourceProvider.h5
-rw-r--r--src/gpu/batches/GrAADistanceFieldPathRenderer.cpp30
-rw-r--r--src/gpu/batches/GrAADistanceFieldPathRenderer.h6
-rw-r--r--src/gpu/text/GrBatchFontCache.cpp25
-rw-r--r--src/gpu/text/GrBatchFontCache.h4
8 files changed, 54 insertions, 71 deletions
diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp
index 72808c9143..684db22c12 100644
--- a/src/gpu/GrBatchAtlas.cpp
+++ b/src/gpu/GrBatchAtlas.cpp
@@ -120,30 +120,30 @@ void GrBatchAtlas::BatchPlot::resetRects() {
///////////////////////////////////////////////////////////////////////////////
-GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
- : fTexture(texture)
+GrBatchAtlas::GrBatchAtlas(sk_sp<GrTexture> texture, int numPlotsX, int numPlotsY)
+ : fTexture(std::move(texture))
, fAtlasGeneration(kInvalidAtlasGeneration + 1) {
- fPlotWidth = texture->width() / numPlotsX;
- fPlotHeight = texture->height() / numPlotsY;
+ fPlotWidth = fTexture->width() / numPlotsX;
+ fPlotHeight = fTexture->height() / numPlotsY;
SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
- SkASSERT(fPlotWidth * numPlotsX == texture->width());
- SkASSERT(fPlotHeight * numPlotsY == texture->height());
+ SkASSERT(fPlotWidth * numPlotsX == fTexture->width());
+ SkASSERT(fPlotHeight * numPlotsY == fTexture->height());
SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;)
// We currently do not support compressed atlases...
- SkASSERT(!GrPixelConfigIsCompressed(texture->desc().fConfig));
+ SkASSERT(!GrPixelConfigIsCompressed(fTexture->desc().fConfig));
// set up allocated plots
- fPlotArray = new sk_sp<BatchPlot>[numPlotsX * numPlotsY];
+ fPlotArray.reset(new sk_sp<BatchPlot>[numPlotsX * numPlotsY]);
- sk_sp<BatchPlot>* currPlot = fPlotArray;
+ sk_sp<BatchPlot>* currPlot = fPlotArray.get();
for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
uint32_t index = r * numPlotsX + c;
currPlot->reset(new BatchPlot(index, 1, x, y, fPlotWidth, fPlotHeight,
- texture->desc().fConfig));
+ fTexture->desc().fConfig));
// build LRU list
fPlotList.addToHead(currPlot->get());
@@ -152,11 +152,6 @@ GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
}
}
-GrBatchAtlas::~GrBatchAtlas() {
- SkSafeUnref(fTexture);
- delete[] fPlotArray;
-}
-
void GrBatchAtlas::processEviction(AtlasID id) {
for (int i = 0; i < fEvictionCallbacks.count(); i++) {
(*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
@@ -172,7 +167,7 @@ inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B
if (target->hasDrawBeenFlushed(plot->lastUploadToken())) {
// With c+14 we could move sk_sp into lamba to only ref once.
sk_sp<BatchPlot> plotsp(SkRef(plot));
- GrTexture* texture = fTexture;
+ GrTexture* texture = fTexture.get();
GrBatchDrawToken lastUploadToken = target->addAsapUpload(
[plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
plotsp->uploadToTexture(writePixels, texture);
@@ -242,7 +237,7 @@ bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* target,
// one it displaced most likely was uploaded asap.
// With c+14 we could move sk_sp into lamba to only ref once.
sk_sp<BatchPlot> plotsp(SkRef(newPlot.get()));
- GrTexture* texture = fTexture;
+ GrTexture* texture = fTexture.get();
GrBatchDrawToken lastUploadToken = target->addInlineUpload(
[plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
plotsp->uploadToTexture(writePixels, texture);
diff --git a/src/gpu/GrBatchAtlas.h b/src/gpu/GrBatchAtlas.h
index d8856e8ef1..7cf15bd336 100644
--- a/src/gpu/GrBatchAtlas.h
+++ b/src/gpu/GrBatchAtlas.h
@@ -41,8 +41,7 @@ public:
// the eviction
typedef void (*EvictionFunc)(GrBatchAtlas::AtlasID, void*);
- GrBatchAtlas(GrTexture*, int numPlotsX, int numPlotsY);
- ~GrBatchAtlas();
+ GrBatchAtlas(sk_sp<GrTexture>, int numPlotsX, int numPlotsY);
// Adds a width x height subimage to the atlas. Upon success it returns
// the containing GrPlot and absolute location in the backing texture.
@@ -54,7 +53,7 @@ public:
bool addToAtlas(AtlasID*, GrDrawBatch::Target*, int width, int height, const void* image,
SkIPoint16* loc);
- GrTexture* getTexture() const { return fTexture; }
+ GrTexture* getTexture() const { return fTexture.get(); }
uint64_t atlasGeneration() const { return fAtlasGeneration; }
@@ -240,9 +239,9 @@ private:
inline void processEviction(AtlasID);
- GrTexture* fTexture;
- int fPlotWidth;
- int fPlotHeight;
+ sk_sp<GrTexture> fTexture;
+ int fPlotWidth;
+ int fPlotHeight;
SkDEBUGCODE(uint32_t fNumPlots;)
uint64_t fAtlasGeneration;
@@ -254,7 +253,7 @@ private:
SkTDArray<EvictionData> fEvictionCallbacks;
// allocated array of GrBatchPlots
- sk_sp<BatchPlot>* fPlotArray;
+ std::unique_ptr<sk_sp<BatchPlot>[]> fPlotArray;
// LRU list of GrPlots (MRU at head - LRU at tail)
GrBatchPlotList fPlotList;
};
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index a6958e5440..fbffa27422 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -135,10 +135,11 @@ GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedTyp
return buffer;
}
-GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,
- int width, int height,
- int numPlotsX, int numPlotsY,
- GrBatchAtlas::EvictionFunc func, void* data) {
+std::unique_ptr<GrBatchAtlas> GrResourceProvider::makeAtlas(GrPixelConfig config,
+ int width, int height,
+ int numPlotsX, int numPlotsY,
+ GrBatchAtlas::EvictionFunc func,
+ void* data) {
GrSurfaceDesc desc;
desc.fFlags = kNone_GrSurfaceFlags;
desc.fWidth = width;
@@ -149,11 +150,11 @@ GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,
// guarantee we do not recieve a texture with pending IO
// TODO: Determine how to avoid having to do this. (https://bug.skia.org/4156)
static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
- GrTexture* texture = this->createApproxTexture(desc, kFlags);
+ sk_sp<GrTexture> texture(this->createApproxTexture(desc, kFlags));
if (!texture) {
return nullptr;
}
- GrBatchAtlas* atlas = new GrBatchAtlas(texture, numPlotsX, numPlotsY);
+ std::unique_ptr<GrBatchAtlas> atlas(new GrBatchAtlas(std::move(texture), numPlotsX, numPlotsY));
atlas->registerEvictionCallback(func, data);
return atlas;
}
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index c29a40a2b4..abcd699957 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -144,8 +144,9 @@ public:
*
* @return An initialized GrBatchAtlas, or nullptr if creation fails
*/
- GrBatchAtlas* createAtlas(GrPixelConfig, int width, int height, int numPlotsX, int numPlotsY,
- GrBatchAtlas::EvictionFunc func, void* data);
+ std::unique_ptr<GrBatchAtlas> makeAtlas(GrPixelConfig, int width, int height,
+ int numPlotsX, int numPlotsY,
+ GrBatchAtlas::EvictionFunc func, void* data);
/**
* If passed in render target already has a stencil buffer, return it. Otherwise attempt to
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
index 2572ab821d..bea604d702 100644
--- a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -70,7 +70,6 @@ GrAADistanceFieldPathRenderer::~GrAADistanceFieldPathRenderer() {
iter.next();
delete shapeData;
}
- delete fAtlas;
#ifdef DF_PATH_TRACKING
SkDebugf("Cached shapes: %d, freed shapes: %d\n", g_NumCachedShapes, g_NumFreedShapes);
@@ -516,11 +515,11 @@ bool GrAADistanceFieldPathRenderer::onDrawPath(const DrawPathArgs& args) {
SkASSERT(!args.fShape->isEmpty());
SkASSERT(args.fShape->hasUnstyledKey());
if (!fAtlas) {
- fAtlas = args.fResourceProvider->createAtlas(kAlpha_8_GrPixelConfig,
- ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
- NUM_PLOTS_X, NUM_PLOTS_Y,
- &GrAADistanceFieldPathRenderer::HandleEviction,
- (void*)this);
+ fAtlas = args.fResourceProvider->makeAtlas(kAlpha_8_GrPixelConfig,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ NUM_PLOTS_X, NUM_PLOTS_Y,
+ &GrAADistanceFieldPathRenderer::HandleEviction,
+ (void*)this);
if (!fAtlas) {
return false;
}
@@ -529,9 +528,8 @@ bool GrAADistanceFieldPathRenderer::onDrawPath(const DrawPathArgs& args) {
sk_sp<GrDrawBatch> batch(new AADistanceFieldPathBatch(args.fPaint->getColor(),
*args.fShape,
args.fAntiAlias, *args.fViewMatrix,
- fAtlas, &fShapeCache, &fShapeList,
+ fAtlas.get(), &fShapeCache, &fShapeList,
args.fGammaCorrect));
-
GrPipelineBuilder pipelineBuilder(*args.fPaint);
pipelineBuilder.setUserStencil(args.fUserStencilSettings);
@@ -560,7 +558,7 @@ struct PathTestStruct {
fShapeList.remove(shapeData);
delete shapeData;
}
- delete fAtlas;
+ fAtlas = nullptr;
fShapeCache.reset();
}
@@ -581,7 +579,7 @@ struct PathTestStruct {
}
uint32_t fContextID;
- GrBatchAtlas* fAtlas;
+ std::unique_ptr<GrBatchAtlas> fAtlas;
ShapeCache fShapeCache;
ShapeDataList fShapeList;
};
@@ -593,11 +591,11 @@ DRAW_BATCH_TEST_DEFINE(AADistanceFieldPathBatch) {
gTestStruct.fContextID = context->uniqueID();
gTestStruct.reset();
gTestStruct.fAtlas =
- context->resourceProvider()->createAtlas(kAlpha_8_GrPixelConfig,
- ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
- NUM_PLOTS_X, NUM_PLOTS_Y,
- &PathTestStruct::HandleEviction,
- (void*)&gTestStruct);
+ context->resourceProvider()->makeAtlas(kAlpha_8_GrPixelConfig,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ NUM_PLOTS_X, NUM_PLOTS_Y,
+ &PathTestStruct::HandleEviction,
+ (void*)&gTestStruct);
}
SkMatrix viewMatrix = GrTest::TestMatrix(random);
@@ -612,7 +610,7 @@ DRAW_BATCH_TEST_DEFINE(AADistanceFieldPathBatch) {
shape,
antiAlias,
viewMatrix,
- gTestStruct.fAtlas,
+ gTestStruct.fAtlas.get(),
&gTestStruct.fShapeCache,
&gTestStruct.fShapeList,
gammaCorrect);
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.h b/src/gpu/batches/GrAADistanceFieldPathRenderer.h
index 985b2f1537..171108af91 100644
--- a/src/gpu/batches/GrAADistanceFieldPathRenderer.h
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.h
@@ -90,9 +90,9 @@ private:
typedef SkTDynamicHash<ShapeData, ShapeData::Key> ShapeCache;
typedef SkTInternalLList<ShapeData> ShapeDataList;
- GrBatchAtlas* fAtlas;
- ShapeCache fShapeCache;
- ShapeDataList fShapeList;
+ std::unique_ptr<GrBatchAtlas> fAtlas;
+ ShapeCache fShapeCache;
+ ShapeDataList fShapeList;
typedef GrPathRenderer INHERITED;
diff --git a/src/gpu/text/GrBatchFontCache.cpp b/src/gpu/text/GrBatchFontCache.cpp
index 3e212cd171..baed514e38 100644
--- a/src/gpu/text/GrBatchFontCache.cpp
+++ b/src/gpu/text/GrBatchFontCache.cpp
@@ -24,11 +24,10 @@ bool GrBatchFontCache::initAtlas(GrMaskFormat format) {
int numPlotsX = fAtlasConfigs[index].numPlotsX();
int numPlotsY = fAtlasConfigs[index].numPlotsY();
- fAtlases[index] =
- fContext->resourceProvider()->createAtlas(config, width, height,
- numPlotsX, numPlotsY,
- &GrBatchFontCache::HandleEviction,
- (void*)this);
+ fAtlases[index] = fContext->resourceProvider()->makeAtlas(config, width, height,
+ numPlotsX, numPlotsY,
+ &GrBatchFontCache::HandleEviction,
+ (void*)this);
if (!fAtlases[index]) {
return false;
}
@@ -39,9 +38,6 @@ bool GrBatchFontCache::initAtlas(GrMaskFormat format) {
GrBatchFontCache::GrBatchFontCache(GrContext* context)
: fContext(context)
, fPreserveStrike(nullptr) {
- for (int i = 0; i < kMaskFormatCount; ++i) {
- fAtlases[i] = nullptr;
- }
// setup default atlas configs
fAtlasConfigs[kA8_GrMaskFormat].fWidth = 2048;
@@ -73,9 +69,6 @@ GrBatchFontCache::~GrBatchFontCache() {
(*iter).unref();
++iter;
}
- for (int i = 0; i < kMaskFormatCount; ++i) {
- delete fAtlases[i];
- }
}
void GrBatchFontCache::freeAll() {
@@ -87,7 +80,6 @@ void GrBatchFontCache::freeAll() {
}
fCache.rewind();
for (int i = 0; i < kMaskFormatCount; ++i) {
- delete fAtlases[i];
fAtlases[i] = nullptr;
}
}
@@ -130,13 +122,10 @@ void GrBatchFontCache::dump() const {
}
void GrBatchFontCache::setAtlasSizes_ForTesting(const GrBatchAtlasConfig configs[3]) {
- // delete any old atlases, this should be safe to do as long as we are not in the middle of a
- // flush
+ // Delete any old atlases.
+ // This should be safe to do as long as we are not in the middle of a flush.
for (int i = 0; i < kMaskFormatCount; i++) {
- if (fAtlases[i]) {
- delete fAtlases[i];
- fAtlases[i] = nullptr;
- }
+ fAtlases[i] = nullptr;
}
memcpy(fAtlasConfigs, configs, sizeof(fAtlasConfigs));
}
diff --git a/src/gpu/text/GrBatchFontCache.h b/src/gpu/text/GrBatchFontCache.h
index 9bb19f5274..d7503b1720 100644
--- a/src/gpu/text/GrBatchFontCache.h
+++ b/src/gpu/text/GrBatchFontCache.h
@@ -225,7 +225,7 @@ private:
GrBatchAtlas* getAtlas(GrMaskFormat format) const {
int atlasIndex = MaskFormatToAtlasIndex(format);
SkASSERT(fAtlases[atlasIndex]);
- return fAtlases[atlasIndex];
+ return fAtlases[atlasIndex].get();
}
static void HandleEviction(GrBatchAtlas::AtlasID, void*);
@@ -233,7 +233,7 @@ private:
using StrikeHash = SkTDynamicHash<GrBatchTextStrike, SkDescriptor>;
GrContext* fContext;
StrikeHash fCache;
- GrBatchAtlas* fAtlases[kMaskFormatCount];
+ std::unique_ptr<GrBatchAtlas> fAtlases[kMaskFormatCount];
GrBatchTextStrike* fPreserveStrike;
GrBatchAtlasConfig fAtlasConfigs[kMaskFormatCount];
};