aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
authorGravatar bsalomon <bsalomon@google.com>2015-01-14 10:42:08 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2015-01-14 10:42:08 -0800
commit5236cf480daf82b2f36e42795abdbbc915533a59 (patch)
treeb3e76998aabf4849c60d7bc0c701de47b2aaf4e9 /src/gpu
parent028b98a08072bd1764936e47c54fa2da5cf92744 (diff)
Make uncached textures uncached from the get go.
This avoids the problem of a newly created uncached texture causing a purge of cached resources. BUG=chromium:445885 Review URL: https://codereview.chromium.org/846303002
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/GrAtlas.cpp2
-rwxr-xr-xsrc/gpu/GrContext.cpp19
-rwxr-xr-xsrc/gpu/GrDistanceFieldTextContext.cpp2
-rw-r--r--src/gpu/GrGeometryBuffer.h4
-rw-r--r--src/gpu/GrGpu.cpp6
-rw-r--r--src/gpu/GrGpu.h11
-rw-r--r--src/gpu/GrGpuResource.cpp28
-rw-r--r--src/gpu/GrGpuResourceCacheAccess.h13
-rw-r--r--src/gpu/GrIndexBuffer.h4
-rw-r--r--src/gpu/GrPath.h4
-rw-r--r--src/gpu/GrPathRange.cpp4
-rw-r--r--src/gpu/GrPathRange.h2
-rw-r--r--src/gpu/GrResourceCache2.cpp6
-rw-r--r--src/gpu/GrStencilBuffer.h4
-rw-r--r--src/gpu/GrTest.cpp5
-rw-r--r--src/gpu/GrTexture.cpp6
-rw-r--r--src/gpu/GrVertexBuffer.h4
-rw-r--r--src/gpu/gl/GrGLBufferImpl.cpp3
-rw-r--r--src/gpu/gl/GrGLBufferImpl.h1
-rw-r--r--src/gpu/gl/GrGLGpu.cpp42
-rw-r--r--src/gpu/gl/GrGLGpu.h8
-rw-r--r--src/gpu/gl/GrGLIndexBuffer.cpp2
-rw-r--r--src/gpu/gl/GrGLPath.cpp4
-rw-r--r--src/gpu/gl/GrGLRenderTarget.cpp10
-rw-r--r--src/gpu/gl/GrGLRenderTarget.h8
-rw-r--r--src/gpu/gl/GrGLStencilBuffer.h3
-rw-r--r--src/gpu/gl/GrGLTexture.cpp10
-rw-r--r--src/gpu/gl/GrGLTexture.h4
-rw-r--r--src/gpu/gl/GrGLTextureRenderTarget.h2
-rw-r--r--src/gpu/gl/GrGLVertexArray.cpp2
-rw-r--r--src/gpu/gl/GrGLVertexBuffer.cpp2
31 files changed, 85 insertions, 140 deletions
diff --git a/src/gpu/GrAtlas.cpp b/src/gpu/GrAtlas.cpp
index c065dabd0b..7ebdf6eb12 100644
--- a/src/gpu/GrAtlas.cpp
+++ b/src/gpu/GrAtlas.cpp
@@ -225,7 +225,7 @@ GrPlot* GrAtlas::addToAtlas(ClientPlotUsage* usage,
desc.fHeight = fBackingTextureSize.height();
desc.fConfig = fPixelConfig;
- fTexture = fGpu->createTexture(desc, NULL, 0);
+ fTexture = fGpu->createTexture(desc, true, NULL, 0);
if (NULL == fTexture) {
return NULL;
}
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index e6fd3589ac..785691dbba 100755
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -298,7 +298,7 @@ GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
- GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
+ GrTexture* texture = fGpu->createTexture(rtDesc, true, NULL, 0);
if (texture) {
GrDrawState drawState;
@@ -347,7 +347,7 @@ GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
size_t stretchedRowBytes = rtDesc.fWidth * bpp;
- texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
+ texture = fGpu->createTexture(rtDesc, true, stretchedPixels.get(), stretchedRowBytes);
SkASSERT(texture);
}
@@ -371,7 +371,7 @@ GrTexture* GrContext::createTexture(const GrTextureParams* params,
srcData, rowBytes,
GrTexturePriv::NeedsBilerp(resourceKey));
} else {
- texture = fGpu->createTexture(desc, srcData, rowBytes);
+ texture = fGpu->createTexture(desc, true, srcData, rowBytes);
}
if (texture) {
@@ -445,7 +445,7 @@ GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexM
desc.writable()->fFlags = origFlags;
}
- GrTexture* texture = fGpu->createTexture(*desc, NULL, 0);
+ GrTexture* texture = fGpu->createTexture(*desc, true, NULL, 0);
#ifdef SK_DEBUG
GrScratchKey key;
GrTexturePriv::ComputeScratchKey(*desc, &key);
@@ -464,17 +464,10 @@ void GrContext::OverBudgetCB(void* data) {
}
-GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& descIn,
+GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& desc,
void* srcData,
size_t rowBytes) {
- GrSurfaceDesc descCopy = descIn;
- GrTexture* texture = fGpu->createTexture(descCopy, srcData, rowBytes);
- if (texture) {
- // TODO: It'd be nice to be able to do this before creation so we don't boot something
- // out of the cache. We could temporarily boost the cache budget.
- texture->cacheAccess().setBudgeted(false);
- }
- return texture;
+ return fGpu->createTexture(desc, false, srcData, rowBytes);
}
void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
diff --git a/src/gpu/GrDistanceFieldTextContext.cpp b/src/gpu/GrDistanceFieldTextContext.cpp
index 69dcaa700c..14809756c8 100755
--- a/src/gpu/GrDistanceFieldTextContext.cpp
+++ b/src/gpu/GrDistanceFieldTextContext.cpp
@@ -188,7 +188,7 @@ static void setup_gamma_texture(GrContext* context, const SkGlyphCache* cache,
desc.fHeight = height;
desc.fConfig = kAlpha_8_GrPixelConfig;
- *gammaTexture = context->getGpu()->createTexture(desc, NULL, 0);
+ *gammaTexture = context->getGpu()->createTexture(desc, true, NULL, 0);
if (NULL == *gammaTexture) {
return;
}
diff --git a/src/gpu/GrGeometryBuffer.h b/src/gpu/GrGeometryBuffer.h
index d6761831b1..55d43d7c66 100644
--- a/src/gpu/GrGeometryBuffer.h
+++ b/src/gpu/GrGeometryBuffer.h
@@ -99,8 +99,8 @@ public:
}
protected:
- GrGeometryBuffer(GrGpu* gpu, bool isWrapped, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, isWrapped)
+ GrGeometryBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
+ : INHERITED(gpu, kCached_LifeCycle)
, fMapPtr(NULL)
, fGpuMemorySize(gpuMemorySize)
, fDynamic(dynamic)
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index 23a7fde9e9..0c60a020fe 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -34,7 +34,7 @@ void GrGpu::contextAbandoned() {}
////////////////////////////////////////////////////////////////////////////////
-GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc,
+GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc, bool budgeted,
const void* srcData, size_t rowBytes) {
if (!this->caps()->isConfigTexturable(desc.fConfig)) {
return NULL;
@@ -56,10 +56,10 @@ GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc,
}
this->handleDirtyContext();
- tex = this->onCreateCompressedTexture(desc, srcData);
+ tex = this->onCreateCompressedTexture(desc, budgeted, srcData);
} else {
this->handleDirtyContext();
- tex = this->onCreateTexture(desc, srcData, rowBytes);
+ tex = this->onCreateTexture(desc, budgeted, srcData, rowBytes);
if (tex &&
(kRenderTarget_GrSurfaceFlag & desc.fFlags) &&
!(kNoStencil_GrSurfaceFlag & desc.fFlags)) {
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index a657d17c5c..429c9c8439 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -90,6 +90,7 @@ public:
* or render targets can be checked using GrDrawTargetCaps.
*
* @param desc describes the texture to be created.
+ * @param budgeted does this texture count against the resource cache budget?
* @param srcData texel data to load texture. Begins with full-size
* palette data for paletted textures. For compressed
* formats it contains the compressed pixel data. Otherwise,
@@ -101,7 +102,8 @@ public:
*
* @return The texture object if successful, otherwise NULL.
*/
- GrTexture* createTexture(const GrSurfaceDesc& desc, const void* srcData, size_t rowBytes);
+ GrTexture* createTexture(const GrSurfaceDesc& desc, bool budgeted,
+ const void* srcData, size_t rowBytes);
/**
* Implements GrContext::wrapBackendTexture
@@ -423,10 +425,9 @@ private:
virtual void onResetContext(uint32_t resetBits) = 0;
// overridden by backend-specific derived class to create objects.
- virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
- const void* srcData,
- size_t rowBytes) = 0;
- virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
+ virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc, bool budgeted,
+ const void* srcData, size_t rowBytes) = 0;
+ virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, bool budgeted,
const void* srcData) = 0;
virtual GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&) = 0;
virtual GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&) = 0;
diff --git a/src/gpu/GrGpuResource.cpp b/src/gpu/GrGpuResource.cpp
index b2fad0b3f7..a2df7e13e8 100644
--- a/src/gpu/GrGpuResource.cpp
+++ b/src/gpu/GrGpuResource.cpp
@@ -18,16 +18,12 @@ static inline GrResourceCache2* get_resource_cache2(GrGpu* gpu) {
return gpu->getContext()->getResourceCache2();
}
-GrGpuResource::GrGpuResource(GrGpu* gpu, bool isWrapped)
+GrGpuResource::GrGpuResource(GrGpu* gpu, LifeCycle lifeCycle)
: fGpu(gpu)
, fGpuMemorySize(kInvalidGpuMemorySize)
+ , fFlags(0)
+ , fLifeCycle(lifeCycle)
, fUniqueID(CreateUniqueID()) {
- if (isWrapped) {
- fFlags = kWrapped_Flag;
- } else {
- // By default all non-wrapped resources are budgeted.
- fFlags = kBudgeted_Flag;
- }
}
void GrGpuResource::registerWithCache() {
@@ -146,21 +142,3 @@ uint32_t GrGpuResource::CreateUniqueID() {
} while (id == SK_InvalidUniqueID);
return id;
}
-
-void GrGpuResource::setBudgeted(bool countsAgainstBudget) {
- // Wrapped resources never count against the budget, nothing to do. No point in changing the
- // budgeting of destroyed resources.
- if (this->isWrapped() || this->wasDestroyed()) {
- return;
- }
-
- uint32_t oldFlags = fFlags;
- if (countsAgainstBudget) {
- fFlags |= kBudgeted_Flag;
- } else {
- fFlags &= ~kBudgeted_Flag;
- }
- if (fFlags != oldFlags) {
- get_resource_cache2(fGpu)->resourceAccess().didChangeBudgetStatus(this);
- }
-}
diff --git a/src/gpu/GrGpuResourceCacheAccess.h b/src/gpu/GrGpuResourceCacheAccess.h
index 475317ddb2..0aadb89d97 100644
--- a/src/gpu/GrGpuResourceCacheAccess.h
+++ b/src/gpu/GrGpuResourceCacheAccess.h
@@ -29,11 +29,6 @@ public:
}
/**
- * Changes whether the resource counts against the resource cache budget.
- */
- void setBudgeted(bool countsAgainstBudget) { fResource->setBudgeted(countsAgainstBudget); }
-
- /**
* Is the resource currently cached as scratch? This means it has a valid scratch key and does
* not have a content key.
*/
@@ -67,16 +62,12 @@ public:
/**
* Is the resource object wrapping an externally allocated GPU resource?
*/
- bool isWrapped() const { return fResource->isWrapped(); }
+ bool isWrapped() const { return GrGpuResource::kWrapped_LifeCycle == fResource->fLifeCycle; }
/**
* Does the resource count against the resource budget?
*/
- bool isBudgeted() const {
- bool ret = SkToBool(GrGpuResource::kBudgeted_Flag & fResource->fFlags);
- SkASSERT(!(ret && fResource->isWrapped()));
- return ret;
- }
+ bool isBudgeted() const { return GrGpuResource::kCached_LifeCycle == fResource->fLifeCycle; }
/**
* Called by the cache to delete the resource under normal circumstances.
diff --git a/src/gpu/GrIndexBuffer.h b/src/gpu/GrIndexBuffer.h
index 113b89d307..4dfd1c1b07 100644
--- a/src/gpu/GrIndexBuffer.h
+++ b/src/gpu/GrIndexBuffer.h
@@ -24,8 +24,8 @@ public:
return static_cast<int>(this->gpuMemorySize() / (sizeof(uint16_t) * 6));
}
protected:
- GrIndexBuffer(GrGpu* gpu, bool isWrapped, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, isWrapped, gpuMemorySize, dynamic, cpuBacked) {}
+ GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
+ : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
private:
typedef GrGeometryBuffer INHERITED;
};
diff --git a/src/gpu/GrPath.h b/src/gpu/GrPath.h
index 394db6f5c2..eaa3b40fdb 100644
--- a/src/gpu/GrPath.h
+++ b/src/gpu/GrPath.h
@@ -20,8 +20,8 @@ public:
/**
* Initialize to a path with a fixed stroke. Stroke must not be hairline.
*/
- GrPath(GrGpu* gpu, bool isWrapped, const SkPath& skPath, const SkStrokeRec& stroke)
- : INHERITED(gpu, isWrapped),
+ GrPath(GrGpu* gpu, const SkPath& skPath, const SkStrokeRec& stroke)
+ : INHERITED(gpu, kCached_LifeCycle),
fSkPath(skPath),
fStroke(stroke),
fBounds(skPath.getBounds()) {
diff --git a/src/gpu/GrPathRange.cpp b/src/gpu/GrPathRange.cpp
index cfe89feaf6..8bac750b44 100644
--- a/src/gpu/GrPathRange.cpp
+++ b/src/gpu/GrPathRange.cpp
@@ -15,7 +15,7 @@ enum {
GrPathRange::GrPathRange(GrGpu* gpu,
PathGenerator* pathGenerator,
const SkStrokeRec& stroke)
- : INHERITED(gpu, kIsWrapped),
+ : INHERITED(gpu, kCached_LifeCycle),
fPathGenerator(SkRef(pathGenerator)),
fNumPaths(fPathGenerator->getNumPaths()),
fStroke(stroke) {
@@ -27,7 +27,7 @@ GrPathRange::GrPathRange(GrGpu* gpu,
GrPathRange::GrPathRange(GrGpu* gpu,
int numPaths,
const SkStrokeRec& stroke)
- : INHERITED(gpu, kIsWrapped),
+ : INHERITED(gpu, kCached_LifeCycle),
fNumPaths(numPaths),
fStroke(stroke) {
}
diff --git a/src/gpu/GrPathRange.h b/src/gpu/GrPathRange.h
index 014b7ece74..86883e3c27 100644
--- a/src/gpu/GrPathRange.h
+++ b/src/gpu/GrPathRange.h
@@ -26,8 +26,6 @@ class GrPathRange : public GrGpuResource {
public:
SK_DECLARE_INST_COUNT(GrPathRange);
- static const bool kIsWrapped = false;
-
enum PathIndexType {
kU8_PathIndexType, //!< uint8_t
kU16_PathIndexType, //!< uint16_t
diff --git a/src/gpu/GrResourceCache2.cpp b/src/gpu/GrResourceCache2.cpp
index 1db96ec1cc..e23f9688ac 100644
--- a/src/gpu/GrResourceCache2.cpp
+++ b/src/gpu/GrResourceCache2.cpp
@@ -261,10 +261,9 @@ void GrResourceCache2::notifyPurgable(GrGpuResource* resource) {
bool noKey = !resource->cacheAccess().isScratch() &&
(NULL == resource->cacheAccess().getContentKey());
- // Wrapped resources should never have a key.
- SkASSERT(noKey || !resource->cacheAccess().isWrapped());
+ // Only cached resources should ever have a key.
+ SkASSERT(noKey || resource->cacheAccess().isBudgeted());
- // And purge if the resource is wrapped
if (overBudget || noKey) {
SkDEBUGCODE(int beforeCount = fCount;)
resource->cacheAccess().release();
@@ -316,7 +315,6 @@ void GrResourceCache2::didChangeBudgetStatus(GrGpuResource* resource) {
this->validate();
}
-
void GrResourceCache2::internalPurgeAsNeeded() {
SkASSERT(!fPurging);
SkASSERT(!fNewlyPurgableResourceWhilePurging);
diff --git a/src/gpu/GrStencilBuffer.h b/src/gpu/GrStencilBuffer.h
index 11eceddb4e..883a982ac4 100644
--- a/src/gpu/GrStencilBuffer.h
+++ b/src/gpu/GrStencilBuffer.h
@@ -50,8 +50,8 @@ public:
static void ComputeKey(int width, int height, int sampleCnt, GrScratchKey* key);
protected:
- GrStencilBuffer(GrGpu* gpu, bool isWrapped, int width, int height, int bits, int sampleCnt)
- : GrGpuResource(gpu, isWrapped)
+ GrStencilBuffer(GrGpu* gpu, int width, int height, int bits, int sampleCnt)
+ : GrGpuResource(gpu, kCached_LifeCycle)
, fWidth(width)
, fHeight(height)
, fBits(bits)
diff --git a/src/gpu/GrTest.cpp b/src/gpu/GrTest.cpp
index ec31307c1a..d7098533a0 100644
--- a/src/gpu/GrTest.cpp
+++ b/src/gpu/GrTest.cpp
@@ -87,13 +87,12 @@ public:
private:
void onResetContext(uint32_t resetBits) SK_OVERRIDE {}
- GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
- const void* srcData,
+ GrTexture* onCreateTexture(const GrSurfaceDesc& desc, bool budgeted, const void* srcData,
size_t rowBytes) SK_OVERRIDE {
return NULL;
}
- GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
+ GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, bool budgeted,
const void* srcData) SK_OVERRIDE {
return NULL;
}
diff --git a/src/gpu/GrTexture.cpp b/src/gpu/GrTexture.cpp
index eb518f5635..378e9900f7 100644
--- a/src/gpu/GrTexture.cpp
+++ b/src/gpu/GrTexture.cpp
@@ -118,11 +118,11 @@ GrSurfaceOrigin resolve_origin(const GrSurfaceDesc& desc) {
}
//////////////////////////////////////////////////////////////////////////////
-GrTexture::GrTexture(GrGpu* gpu, bool isWrapped, const GrSurfaceDesc& desc)
- : INHERITED(gpu, isWrapped, desc)
+GrTexture::GrTexture(GrGpu* gpu, LifeCycle lifeCycle, const GrSurfaceDesc& desc)
+ : INHERITED(gpu, lifeCycle, desc)
, fMipMapsStatus(kNotAllocated_MipMapsStatus) {
- if (!isWrapped) {
+ if (kCached_LifeCycle == lifeCycle) {
GrScratchKey key;
GrTexturePriv::ComputeScratchKey(desc, &key);
this->setScratchKey(key);
diff --git a/src/gpu/GrVertexBuffer.h b/src/gpu/GrVertexBuffer.h
index c3cf534892..3f2ada2a2c 100644
--- a/src/gpu/GrVertexBuffer.h
+++ b/src/gpu/GrVertexBuffer.h
@@ -15,8 +15,8 @@
class GrVertexBuffer : public GrGeometryBuffer {
protected:
- GrVertexBuffer(GrGpu* gpu, bool isWrapped, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, isWrapped, gpuMemorySize, dynamic, cpuBacked) {}
+ GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
+ : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
private:
typedef GrGeometryBuffer INHERITED;
};
diff --git a/src/gpu/gl/GrGLBufferImpl.cpp b/src/gpu/gl/GrGLBufferImpl.cpp
index b65fe5b346..5d5ca88b2d 100644
--- a/src/gpu/gl/GrGLBufferImpl.cpp
+++ b/src/gpu/gl/GrGLBufferImpl.cpp
@@ -41,7 +41,7 @@ void GrGLBufferImpl::release(GrGLGpu* gpu) {
if (fCPUData) {
sk_free(fCPUData);
fCPUData = NULL;
- } else if (fDesc.fID && !fDesc.fIsWrapped) {
+ } else if (fDesc.fID) {
GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID));
if (GR_GL_ARRAY_BUFFER == fBufferType) {
gpu->notifyVertexBufferDelete(fDesc.fID);
@@ -219,7 +219,6 @@ void GrGLBufferImpl::validate() const {
SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
// The following assert isn't valid when the buffer has been abandoned:
// SkASSERT((0 == fDesc.fID) == (fCPUData));
- SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped);
SkASSERT(NULL == fCPUData || 0 == fGLSizeInBytes);
SkASSERT(NULL == fMapPtr || fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes);
SkASSERT(NULL == fCPUData || NULL == fMapPtr || fCPUData == fMapPtr);
diff --git a/src/gpu/gl/GrGLBufferImpl.h b/src/gpu/gl/GrGLBufferImpl.h
index 8617a897d4..5526ff93f1 100644
--- a/src/gpu/gl/GrGLBufferImpl.h
+++ b/src/gpu/gl/GrGLBufferImpl.h
@@ -20,7 +20,6 @@ class GrGLGpu;
class GrGLBufferImpl : SkNoncopyable {
public:
struct Desc {
- bool fIsWrapped;
GrGLuint fID; // set to 0 to indicate buffer is CPU-backed and not a VBO.
size_t fSizeInBytes;
bool fDynamic;
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index b99d30ed64..182d258708 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -377,7 +377,7 @@ GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
GrSurfaceDesc surfDesc;
idDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle);
- idDesc.fIsWrapped = true;
+ idDesc.fLifeCycle = GrGpuResource::kWrapped_LifeCycle;
// next line relies on GrBackendTextureDesc's flags matching GrTexture's
surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
@@ -399,7 +399,7 @@ GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
GrGLTexture* texture = NULL;
if (renderTarget) {
GrGLRenderTarget::IDDesc rtIDDesc;
- if (!this->createRenderTargetObjects(surfDesc, idDesc.fTextureID, &rtIDDesc)) {
+ if (!this->createRenderTargetObjects(surfDesc, false, idDesc.fTextureID, &rtIDDesc)) {
return NULL;
}
texture = SkNEW_ARGS(GrGLTextureRenderTarget, (this, surfDesc, idDesc, rtIDDesc));
@@ -418,7 +418,7 @@ GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle);
idDesc.fMSColorRenderbufferID = 0;
idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
- idDesc.fIsWrapped = true;
+ idDesc.fLifeCycle = GrGpuResource::kWrapped_LifeCycle;
GrSurfaceDesc desc;
desc.fConfig = wrapDesc.fConfig;
@@ -435,10 +435,8 @@ GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
format.fPacked = false;
format.fStencilBits = wrapDesc.fStencilBits;
format.fTotalBits = wrapDesc.fStencilBits;
- static const bool kIsSBWrapped = false;
GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer,
(this,
- kIsSBWrapped,
0,
desc.fWidth,
desc.fHeight,
@@ -795,12 +793,13 @@ static bool renderbuffer_storage_msaa(GrGLContext& ctx,
return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));;
}
-bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, GrGLuint texID,
+bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, bool budgeted, GrGLuint texID,
GrGLRenderTarget::IDDesc* idDesc) {
idDesc->fMSColorRenderbufferID = 0;
idDesc->fRTFBOID = 0;
idDesc->fTexFBOID = 0;
- idDesc->fIsWrapped = false;
+ idDesc->fLifeCycle = budgeted ? GrGpuResource::kCached_LifeCycle :
+ GrGpuResource::kUncached_LifeCycle;
GrGLenum status;
@@ -913,12 +912,10 @@ static size_t as_size_t(int x) {
}
#endif
-GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc,
- const void* srcData,
- size_t rowBytes) {
+GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc, bool budgeted,
+ const void* srcData, size_t rowBytes) {
GrSurfaceDesc desc = origDesc;
- GrGLRenderTarget::IDDesc rtIDDesc;
// Attempt to catch un- or wrongly initialized sample counts;
SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
@@ -934,11 +931,6 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc,
desc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
desc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
- rtIDDesc.fMSColorRenderbufferID = 0;
- rtIDDesc.fRTFBOID = 0;
- rtIDDesc.fTexFBOID = 0;
- rtIDDesc.fIsWrapped = false;
-
if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
//SkDebugf("MSAA RT requested but not supported on this platform.");
return return_null_texture();
@@ -958,7 +950,8 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc,
GrGLTexture::IDDesc idDesc;
GL_CALL(GenTextures(1, &idDesc.fTextureID));
- idDesc.fIsWrapped = false;
+ idDesc.fLifeCycle = budgeted ? GrGpuResource::kCached_LifeCycle :
+ GrGpuResource::kUncached_LifeCycle;
if (!idDesc.fTextureID) {
return return_null_texture();
@@ -1007,8 +1000,9 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc,
if (renderTarget) {
// unbind the texture from the texture unit before binding it to the frame buffer
GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
+ GrGLRenderTarget::IDDesc rtIDDesc;
- if (!this->createRenderTargetObjects(desc, idDesc.fTextureID, &rtIDDesc)) {
+ if (!this->createRenderTargetObjects(desc, budgeted, idDesc.fTextureID, &rtIDDesc)) {
GL_CALL(DeleteTextures(1, &idDesc.fTextureID));
return return_null_texture();
}
@@ -1024,7 +1018,8 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc,
return tex;
}
-GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& origDesc, const void* srcData) {
+GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& origDesc, bool budgeted,
+ const void* srcData) {
if(SkToBool(origDesc.fFlags & kRenderTarget_GrSurfaceFlag) || origDesc.fSampleCnt > 0) {
return return_null_texture();
@@ -1045,7 +1040,8 @@ GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& origDesc, con
GrGLTexture::IDDesc idDesc;
GL_CALL(GenTextures(1, &idDesc.fTextureID));
- idDesc.fIsWrapped = false;
+ idDesc.fLifeCycle = budgeted ? GrGpuResource::kCached_LifeCycle :
+ GrGpuResource::kUncached_LifeCycle;
if (!idDesc.fTextureID) {
return return_null_texture();
@@ -1163,10 +1159,8 @@ bool GrGLGpu::createStencilBufferForRenderTarget(GrRenderTarget* rt, int width,
// whatever sizes GL gives us. In that case we query for the size.
GrGLStencilBuffer::Format format = sFmt;
get_stencil_rb_sizes(this->glInterface(), &format);
- static const bool kIsWrapped = false;
SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer,
- (this, kIsWrapped, sbID, width, height,
- samples, format)));
+ (this, sbID, width, height, samples, format)));
if (this->attachStencilBufferToRenderTarget(sb, rt)) {
fLastSuccessfulStencilFmtIdx = sIdx;
rt->setStencilBuffer(sb);
@@ -1252,7 +1246,6 @@ GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
GrGLVertexBuffer::Desc desc;
desc.fDynamic = dynamic;
desc.fSizeInBytes = size;
- desc.fIsWrapped = false;
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
desc.fID = 0;
@@ -1285,7 +1278,6 @@ GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
GrGLIndexBuffer::Desc desc;
desc.fDynamic = dynamic;
desc.fSizeInBytes = size;
- desc.fIsWrapped = false;
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
desc.fID = 0;
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 3809dc7bdf..88cf6f6d53 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -114,10 +114,9 @@ private:
// GrGpu overrides
void onResetContext(uint32_t resetBits) SK_OVERRIDE;
- GrTexture* onCreateTexture(const GrSurfaceDesc& desc,
- const void* srcData,
+ GrTexture* onCreateTexture(const GrSurfaceDesc& desc, bool budgeted, const void* srcData,
size_t rowBytes) SK_OVERRIDE;
- GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,
+ GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, bool budgeted,
const void* srcData) SK_OVERRIDE;
GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) SK_OVERRIDE;
GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) SK_OVERRIDE;
@@ -278,7 +277,8 @@ private:
int left = 0, int top = 0,
int width = -1, int height = -1);
- bool createRenderTargetObjects(const GrSurfaceDesc&, GrGLuint texID, GrGLRenderTarget::IDDesc*);
+ bool createRenderTargetObjects(const GrSurfaceDesc&, bool budgeted, GrGLuint texID,
+ GrGLRenderTarget::IDDesc*);
GrGLuint bindSurfaceAsFBO(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport);
diff --git a/src/gpu/gl/GrGLIndexBuffer.cpp b/src/gpu/gl/GrGLIndexBuffer.cpp
index 2d4b9a88a5..52cf5f87f7 100644
--- a/src/gpu/gl/GrGLIndexBuffer.cpp
+++ b/src/gpu/gl/GrGLIndexBuffer.cpp
@@ -9,7 +9,7 @@
#include "GrGLGpu.h"
GrGLIndexBuffer::GrGLIndexBuffer(GrGLGpu* gpu, const Desc& desc)
- : INHERITED(gpu, desc.fIsWrapped, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID)
+ : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID)
, fImpl(gpu, desc, GR_GL_ELEMENT_ARRAY_BUFFER) {
this->registerWithCache();
}
diff --git a/src/gpu/gl/GrGLPath.cpp b/src/gpu/gl/GrGLPath.cpp
index f5f66b8f8a..6417e684f9 100644
--- a/src/gpu/gl/GrGLPath.cpp
+++ b/src/gpu/gl/GrGLPath.cpp
@@ -88,8 +88,6 @@ inline void points_to_coords(const SkPoint points[], size_t first_point, size_t
}
}
-static const bool kIsWrapped = false; // The constructor creates the GL path object.
-
void GrGLPath::InitPathObject(GrGLGpu* gpu,
GrGLuint pathID,
const SkPath& skPath,
@@ -184,7 +182,7 @@ void GrGLPath::InitPathObject(GrGLGpu* gpu,
}
GrGLPath::GrGLPath(GrGLGpu* gpu, const SkPath& path, const SkStrokeRec& stroke)
- : INHERITED(gpu, kIsWrapped, path, stroke),
+ : INHERITED(gpu, path, stroke),
fPathID(gpu->glPathRendering()->genPaths(1)) {
InitPathObject(gpu, fPathID, fSkPath, stroke);
diff --git a/src/gpu/gl/GrGLRenderTarget.cpp b/src/gpu/gl/GrGLRenderTarget.cpp
index c513b65bf2..3eb2ae09fe 100644
--- a/src/gpu/gl/GrGLRenderTarget.cpp
+++ b/src/gpu/gl/GrGLRenderTarget.cpp
@@ -14,16 +14,16 @@
// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
GrGLRenderTarget::GrGLRenderTarget(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc)
- : GrSurface(gpu, idDesc.fIsWrapped, desc)
- , INHERITED(gpu, idDesc.fIsWrapped, desc) {
+ : GrSurface(gpu, idDesc.fLifeCycle, desc)
+ , INHERITED(gpu, idDesc.fLifeCycle, desc) {
this->init(desc, idDesc);
this->registerWithCache();
}
GrGLRenderTarget::GrGLRenderTarget(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc,
Derived)
- : GrSurface(gpu, idDesc.fIsWrapped, desc)
- , INHERITED(gpu, idDesc.fIsWrapped, desc) {
+ : GrSurface(gpu, idDesc.fLifeCycle, desc)
+ , INHERITED(gpu, idDesc.fLifeCycle, desc) {
this->init(desc, idDesc);
}
@@ -31,7 +31,7 @@ void GrGLRenderTarget::init(const GrSurfaceDesc& desc, const IDDesc& idDesc) {
fRTFBOID = idDesc.fRTFBOID;
fTexFBOID = idDesc.fTexFBOID;
fMSColorRenderbufferID = idDesc.fMSColorRenderbufferID;
- fIsWrapped = idDesc.fIsWrapped;
+ fIsWrapped = kWrapped_LifeCycle == idDesc.fLifeCycle;
fViewport.fLeft = 0;
fViewport.fBottom = 0;
diff --git a/src/gpu/gl/GrGLRenderTarget.h b/src/gpu/gl/GrGLRenderTarget.h
index 09de571b9d..ed1e2b35d8 100644
--- a/src/gpu/gl/GrGLRenderTarget.h
+++ b/src/gpu/gl/GrGLRenderTarget.h
@@ -22,10 +22,10 @@ public:
enum { kUnresolvableFBOID = 0 };
struct IDDesc {
- GrGLuint fRTFBOID;
- GrGLuint fTexFBOID;
- GrGLuint fMSColorRenderbufferID;
- bool fIsWrapped;
+ GrGLuint fRTFBOID;
+ GrGLuint fTexFBOID;
+ GrGLuint fMSColorRenderbufferID;
+ GrGpuResource::LifeCycle fLifeCycle;
};
GrGLRenderTarget(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&);
diff --git a/src/gpu/gl/GrGLStencilBuffer.h b/src/gpu/gl/GrGLStencilBuffer.h
index c5540914b5..3bfbafcad6 100644
--- a/src/gpu/gl/GrGLStencilBuffer.h
+++ b/src/gpu/gl/GrGLStencilBuffer.h
@@ -24,12 +24,11 @@ public:
};
GrGLStencilBuffer(GrGpu* gpu,
- bool isWrapped,
GrGLint rbid,
int width, int height,
int sampleCnt,
const Format& format)
- : GrStencilBuffer(gpu, isWrapped, width, height, format.fStencilBits, sampleCnt)
+ : GrStencilBuffer(gpu, width, height, format.fStencilBits, sampleCnt)
, fFormat(format)
, fRenderbufferID(rbid) {
this->registerWithCache();
diff --git a/src/gpu/gl/GrGLTexture.cpp b/src/gpu/gl/GrGLTexture.cpp
index 2f4f49b0ce..faa9f35cd4 100644
--- a/src/gpu/gl/GrGLTexture.cpp
+++ b/src/gpu/gl/GrGLTexture.cpp
@@ -13,15 +13,15 @@
// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
GrGLTexture::GrGLTexture(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc)
- : GrSurface(gpu, idDesc.fIsWrapped, desc)
- , INHERITED(gpu, idDesc.fIsWrapped, desc) {
+ : GrSurface(gpu, idDesc.fLifeCycle, desc)
+ , INHERITED(gpu, idDesc.fLifeCycle, desc) {
this->init(desc, idDesc);
this->registerWithCache();
}
GrGLTexture::GrGLTexture(GrGLGpu* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc, Derived)
- : GrSurface(gpu, idDesc.fIsWrapped, desc)
- , INHERITED(gpu, idDesc.fIsWrapped, desc) {
+ : GrSurface(gpu, idDesc.fLifeCycle, desc)
+ , INHERITED(gpu, idDesc.fLifeCycle, desc) {
this->init(desc, idDesc);
}
@@ -30,7 +30,7 @@ void GrGLTexture::init(const GrSurfaceDesc& desc, const IDDesc& idDesc) {
fTexParams.invalidate();
fTexParamsTimestamp = GrGpu::kExpiredTimestamp;
fTextureID = idDesc.fTextureID;
- fIsWrapped = idDesc.fIsWrapped;
+ fIsWrapped = kWrapped_LifeCycle == idDesc.fLifeCycle;
}
void GrGLTexture::onRelease() {
diff --git a/src/gpu/gl/GrGLTexture.h b/src/gpu/gl/GrGLTexture.h
index cc8adeb7ce..c64865fcd7 100644
--- a/src/gpu/gl/GrGLTexture.h
+++ b/src/gpu/gl/GrGLTexture.h
@@ -27,8 +27,8 @@ public:
};
struct IDDesc {
- GrGLuint fTextureID;
- bool fIsWrapped;
+ GrGLuint fTextureID;
+ GrGpuResource::LifeCycle fLifeCycle;
};
GrGLTexture(GrGLGpu*, const GrSurfaceDesc&, const IDDesc&);
diff --git a/src/gpu/gl/GrGLTextureRenderTarget.h b/src/gpu/gl/GrGLTextureRenderTarget.h
index af0fde4f02..e2dd14c882 100644
--- a/src/gpu/gl/GrGLTextureRenderTarget.h
+++ b/src/gpu/gl/GrGLTextureRenderTarget.h
@@ -28,7 +28,7 @@ public:
const GrSurfaceDesc& desc,
const GrGLTexture::IDDesc& texIDDesc,
const GrGLRenderTarget::IDDesc& rtIDDesc)
- : GrSurface(gpu, texIDDesc.fIsWrapped, desc)
+ : GrSurface(gpu, texIDDesc.fLifeCycle, desc)
, GrGLTexture(gpu, desc, texIDDesc, GrGLTexture::kDerived)
, GrGLRenderTarget(gpu, desc, rtIDDesc, GrGLRenderTarget::kDerived) {
this->registerWithCache();
diff --git a/src/gpu/gl/GrGLVertexArray.cpp b/src/gpu/gl/GrGLVertexArray.cpp
index d6cc42ac54..bb409c6acd 100644
--- a/src/gpu/gl/GrGLVertexArray.cpp
+++ b/src/gpu/gl/GrGLVertexArray.cpp
@@ -69,7 +69,7 @@ void GrGLAttribArrayState::disableUnusedArrays(const GrGLGpu* gpu, uint64_t used
///////////////////////////////////////////////////////////////////////////////////////////////////
GrGLVertexArray::GrGLVertexArray(GrGLGpu* gpu, GrGLint id, int attribCount)
- : INHERITED(gpu, false)
+ : INHERITED(gpu, kCached_LifeCycle)
, fID(id)
, fAttribArrays(attribCount)
, fIndexBufferIDIsValid(false) {
diff --git a/src/gpu/gl/GrGLVertexBuffer.cpp b/src/gpu/gl/GrGLVertexBuffer.cpp
index 5691176125..61936f25bf 100644
--- a/src/gpu/gl/GrGLVertexBuffer.cpp
+++ b/src/gpu/gl/GrGLVertexBuffer.cpp
@@ -9,7 +9,7 @@
#include "GrGLGpu.h"
GrGLVertexBuffer::GrGLVertexBuffer(GrGLGpu* gpu, const Desc& desc)
- : INHERITED(gpu, desc.fIsWrapped, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID)
+ : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID)
, fImpl(gpu, desc, GR_GL_ARRAY_BUFFER) {
this->registerWithCache();
}