aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar bsalomon <bsalomon@google.com>2014-10-08 08:40:09 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-10-08 08:40:09 -0700
commitbcf0a52d4f4221b158e68a06ba0c4cc4db011060 (patch)
tree6ad69163908f2fa15a6f857b16c1df7c12216ed1
parentb82b9d577e9d811d88de89594bbd6e697892616c (diff)
GrResourceCache2 manages scratch texture.
-rw-r--r--bench/GrResourceCacheBench.cpp5
-rw-r--r--gm/texturedomaineffect.cpp5
-rw-r--r--gm/yuvtorgbeffect.cpp70
-rw-r--r--include/gpu/GrContext.h57
-rw-r--r--include/gpu/GrGpuResource.h80
-rw-r--r--include/gpu/GrGpuResourceRef.h32
-rw-r--r--include/gpu/GrTexture.h2
-rw-r--r--include/gpu/GrTypesPriv.h9
-rw-r--r--include/gpu/SkGr.h4
-rw-r--r--include/gpu/SkGrPixelRef.h8
-rw-r--r--src/core/SkBitmapProcShader.cpp5
-rw-r--r--src/core/SkImageFilter.cpp5
-rw-r--r--src/effects/SkPerlinNoiseShader.cpp19
-rw-r--r--src/effects/SkTableColorFilter.cpp14
-rw-r--r--src/effects/gradients/SkGradientShader.cpp7
-rw-r--r--src/gpu/GrClipMaskCache.h4
-rwxr-xr-xsrc/gpu/GrContext.cpp209
-rw-r--r--src/gpu/GrDrawState.h2
-rw-r--r--src/gpu/GrGpuResource.cpp15
-rw-r--r--src/gpu/GrGpuResourceRef.cpp22
-rw-r--r--src/gpu/GrInOrderDrawBuffer.h16
-rw-r--r--src/gpu/GrLayerCache.cpp1
-rw-r--r--src/gpu/GrOptDrawState.cpp3
-rw-r--r--src/gpu/GrResourceCache.cpp231
-rw-r--r--src/gpu/GrResourceCache.h107
-rw-r--r--src/gpu/GrResourceCache2.cpp32
-rw-r--r--src/gpu/GrResourceCache2.h9
-rw-r--r--src/gpu/GrTexture.cpp45
-rw-r--r--src/gpu/GrTextureAccess.cpp8
-rw-r--r--src/gpu/GrTexturePriv.h5
-rw-r--r--src/gpu/SkGpuDevice.cpp63
-rw-r--r--src/gpu/SkGpuDevice.h6
-rw-r--r--src/gpu/SkGr.cpp13
-rw-r--r--src/gpu/SkGrPixelRef.cpp11
-rw-r--r--src/gpu/effects/GrTextureStripAtlas.cpp1
-rw-r--r--src/image/SkSurface_Gpu.cpp14
-rw-r--r--tests/ResourceCacheTest.cpp45
37 files changed, 413 insertions, 771 deletions
diff --git a/bench/GrResourceCacheBench.cpp b/bench/GrResourceCacheBench.cpp
index 234a62624c..894ec14fca 100644
--- a/bench/GrResourceCacheBench.cpp
+++ b/bench/GrResourceCacheBench.cpp
@@ -11,6 +11,7 @@
#include "Benchmark.h"
#include "GrGpuResource.h"
#include "GrContext.h"
+#include "GrGpu.h"
#include "GrResourceCache.h"
#include "GrStencilBuffer.h"
#include "GrTexture.h"
@@ -185,7 +186,7 @@ protected:
GrGpu* gpu = canvas->getGrContext()->getGpu();
for (int i = 0; i < loops; ++i) {
- GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
+ GrResourceCache cache(gpu->caps(), CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
populate_cache(&cache, gpu, DUPLICATE_COUNT);
populate_cache(&cache, gpu, RESOURCE_COUNT);
@@ -219,7 +220,7 @@ protected:
virtual void onDraw(const int loops, SkCanvas* canvas) SK_OVERRIDE {
GrGpu* gpu = canvas->getGrContext()->getGpu();
- GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
+ GrResourceCache cache(gpu->caps(), CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
populate_cache(&cache, gpu, DUPLICATE_COUNT);
populate_cache(&cache, gpu, RESOURCE_COUNT);
diff --git a/gm/texturedomaineffect.cpp b/gm/texturedomaineffect.cpp
index 6534b0c1dd..acf039513d 100644
--- a/gm/texturedomaineffect.cpp
+++ b/gm/texturedomaineffect.cpp
@@ -92,8 +92,8 @@ protected:
GrDrawState* drawState = tt.target()->drawState();
- GrTexture* texture = GrLockAndRefCachedBitmapTexture(context, fBmp, NULL);
- if (NULL == texture) {
+ SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(context, fBmp, NULL));
+ if (!texture) {
return;
}
@@ -144,7 +144,6 @@ protected:
y += renderRect.height() + kTestPad;
}
}
- GrUnlockAndUnrefCachedBitmapTexture(texture);
}
private:
diff --git a/gm/yuvtorgbeffect.cpp b/gm/yuvtorgbeffect.cpp
index 026823e8ee..b5c6b95314 100644
--- a/gm/yuvtorgbeffect.cpp
+++ b/gm/yuvtorgbeffect.cpp
@@ -83,11 +83,12 @@ protected:
GrDrawState* drawState = tt.target()->drawState();
- GrTexture* texture[3];
- texture[0] = GrLockAndRefCachedBitmapTexture(context, fBmp[0], NULL);
- texture[1] = GrLockAndRefCachedBitmapTexture(context, fBmp[1], NULL);
- texture[2] = GrLockAndRefCachedBitmapTexture(context, fBmp[2], NULL);
- if ((NULL == texture[0]) || (NULL == texture[1]) || (NULL == texture[2])) {
+ SkAutoTUnref<GrTexture> texture[3];
+ texture[0].reset(GrRefCachedBitmapTexture(context, fBmp[0], NULL));
+ texture[1].reset(GrRefCachedBitmapTexture(context, fBmp[1], NULL));
+ texture[2].reset(GrRefCachedBitmapTexture(context, fBmp[2], NULL));
+
+ if (!texture[0] || !texture[1] || !texture[2]) {
return;
}
@@ -97,38 +98,35 @@ protected:
for (int space = kJPEG_SkYUVColorSpace; space <= kLastEnum_SkYUVColorSpace;
++space) {
- SkRect renderRect = SkRect::MakeWH(SkIntToScalar(fBmp[0].width()),
- SkIntToScalar(fBmp[0].height()));
- renderRect.outset(kDrawPad, kDrawPad);
-
- SkScalar y = kDrawPad + kTestPad + space * kColorSpaceOffset;
- SkScalar x = kDrawPad + kTestPad;
-
- const int indices[6][3] = {{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
-
- for (int i = 0; i < 6; ++i) {
- SkAutoTUnref<GrFragmentProcessor> fp(
- GrYUVtoRGBEffect::Create(texture[indices[i][0]],
- texture[indices[i][1]],
- texture[indices[i][2]],
- static_cast<SkYUVColorSpace>(space)));
- if (fp) {
- SkMatrix viewMatrix;
- viewMatrix.setTranslate(x, y);
- drawState->reset(viewMatrix);
- drawState->setRenderTarget(rt);
- drawState->setColor(0xffffffff);
- drawState->addColorProcessor(fp);
- tt.target()->drawSimpleRect(renderRect);
- }
- x += renderRect.width() + kTestPad;
- }
+ SkRect renderRect = SkRect::MakeWH(SkIntToScalar(fBmp[0].width()),
+ SkIntToScalar(fBmp[0].height()));
+ renderRect.outset(kDrawPad, kDrawPad);
+
+ SkScalar y = kDrawPad + kTestPad + space * kColorSpaceOffset;
+ SkScalar x = kDrawPad + kTestPad;
+
+ const int indices[6][3] = {{0, 1, 2}, {0, 2, 1}, {1, 0, 2},
+ {1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
+
+ for (int i = 0; i < 6; ++i) {
+ SkAutoTUnref<GrFragmentProcessor> fp(
+ GrYUVtoRGBEffect::Create(texture[indices[i][0]],
+ texture[indices[i][1]],
+ texture[indices[i][2]],
+ static_cast<SkYUVColorSpace>(space)));
+ if (fp) {
+ SkMatrix viewMatrix;
+ viewMatrix.setTranslate(x, y);
+ drawState->reset(viewMatrix);
+ drawState->setRenderTarget(rt);
+ drawState->setColor(0xffffffff);
+ drawState->addColorProcessor(fp);
+ tt.target()->drawSimpleRect(renderRect);
+ }
+ x += renderRect.width() + kTestPad;
+ }
}
-
- GrUnlockAndUnrefCachedBitmapTexture(texture[0]);
- GrUnlockAndUnrefCachedBitmapTexture(texture[1]);
- GrUnlockAndUnrefCachedBitmapTexture(texture[2]);
- }
+ }
private:
SkBitmap fBmp[3];
diff --git a/include/gpu/GrContext.h b/include/gpu/GrContext.h
index 4500936b82..6d89d4668a 100644
--- a/include/gpu/GrContext.h
+++ b/include/gpu/GrContext.h
@@ -290,15 +290,13 @@ public:
* tiling non-power-of-two textures on APIs that don't support this (e.g.
* unextended GLES2). Tiling a NPOT texture created by lockScratchTexture on
* such an API will create gaps in the tiling pattern. This includes clamp
- * mode. (This may be addressed in a future update.)
- */
- GrTexture* lockAndRefScratchTexture(const GrTextureDesc&, ScratchTexMatch match);
-
- /**
- * When done with an entry, call unlockScratchTexture(entry) on it, which returns
- * it to the cache, where it may be purged. This does not unref the texture.
+ * mode. (This may be addressed in a future update.)7
+ *
+ * internalFlag is a temporary workaround until changes in the internal
+ * architecture are complete. Use the default value.
*/
- void unlockScratchTexture(GrTexture* texture);
+ GrTexture* lockAndRefScratchTexture(const GrTextureDesc&, ScratchTexMatch match,
+ bool internalFlag = false);
/**
* Creates a texture that is outside the cache. Does not count against
@@ -951,6 +949,7 @@ public:
GrDrawTarget* getTextTarget();
const GrIndexBuffer* getQuadIndexBuffer() const;
GrAARectRenderer* getAARectRenderer() { return fAARectRenderer; }
+ GrResourceCache* getResourceCache() { return fResourceCache; }
GrResourceCache2* getResourceCache2() { return fResourceCache2; }
// Called by tests that draw directly to the context via GrDrawTarget
@@ -1075,15 +1074,7 @@ private:
size_t rowBytes,
bool filter);
- // Needed so GrTexture's returnToCache helper function can call
- // addExistingTextureToCache
- friend class GrTexture;
- friend class GrStencilAndCoverPathRenderer;
- friend class GrStencilAndCoverTextContext;
-
- // Add an existing texture to the texture cache. This is intended solely
- // for use with textures released from an GrAutoScratchTexture.
- void addExistingTextureToCache(GrTexture* texture);
+ GrTexture* createNewScratchTexture(const GrTextureDesc& desc);
/**
* These functions create premul <-> unpremul effects if it is possible to generate a pair
@@ -1103,8 +1094,7 @@ private:
};
/**
- * Gets and locks a scratch texture from a descriptor using either exact or approximate criteria.
- * Unlocks texture in the destructor.
+ * This is deprecated. Don't use it.
*/
class SK_API GrAutoScratchTexture : public ::SkNoncopyable {
public:
@@ -1115,10 +1105,11 @@ public:
GrAutoScratchTexture(GrContext* context,
const GrTextureDesc& desc,
- GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch)
+ GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch,
+ bool internalFlag = false)
: fContext(NULL)
, fTexture(NULL) {
- this->set(context, desc, match);
+ this->set(context, desc, match, internalFlag);
}
~GrAutoScratchTexture() {
@@ -1127,34 +1118,26 @@ public:
void reset() {
if (fContext && fTexture) {
- fContext->unlockScratchTexture(fTexture);
fTexture->unref();
fTexture = NULL;
}
}
- /*
- * When detaching a texture we do not unlock it in the texture cache but
- * we do set the returnToCache flag. In this way the texture remains
- * "locked" in the texture cache until it is freed and recycled in
- * GrTexture::internal_dispose. In reality, the texture has been removed
- * from the cache (because this is in AutoScratchTexture) and by not
- * calling unlockScratchTexture we simply don't re-add it. It will be
- * reattached in GrTexture::internal_dispose.
- *
- * Note that the caller is assumed to accept and manage the ref to the
- * returned texture.
- */
- GrTexture* detach();
+ GrTexture* detach() {
+ GrTexture* texture = fTexture;
+ fTexture = NULL;
+ return texture;
+ }
GrTexture* set(GrContext* context,
const GrTextureDesc& desc,
- GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch) {
+ GrContext::ScratchTexMatch match = GrContext::kApprox_ScratchTexMatch,
+ bool internalFlag = 0) {
this->reset();
fContext = context;
if (fContext) {
- fTexture = fContext->lockAndRefScratchTexture(desc, match);
+ fTexture = fContext->lockAndRefScratchTexture(desc, match, internalFlag);
if (NULL == fTexture) {
fContext = NULL;
}
diff --git a/include/gpu/GrGpuResource.h b/include/gpu/GrGpuResource.h
index 61849e7232..0d523249e9 100644
--- a/include/gpu/GrGpuResource.h
+++ b/include/gpu/GrGpuResource.h
@@ -8,9 +8,10 @@
#ifndef GrGpuResource_DEFINED
#define GrGpuResource_DEFINED
+#include "GrResourceKey.h"
+#include "GrTypesPriv.h"
#include "SkInstCnt.h"
#include "SkTInternalLList.h"
-#include "GrResourceKey.h"
class GrResourceCacheEntry;
class GrResourceCache2;
@@ -26,50 +27,39 @@ class GrContext;
* 1) Normal ref (+ by ref(), - by unref()): These are used by code that is issuing draw calls
* that read and write the resource via GrDrawTarget and by any object that must own a
* GrGpuResource and is itself owned (directly or indirectly) by Skia-client code.
- * 2) Pending read (+ by addPendingRead(), - by readCompleted()): GrContext has scheduled a read
+ * 2) Pending read (+ by addPendingRead(), - by completedRead()): GrContext has scheduled a read
* of the resource by the GPU as a result of a skia API call but hasn't executed it yet.
- * 3) Pending write (+ by addPendingWrite(), - by writeCompleted()): GrContext has scheduled a
+ * 3) Pending write (+ by addPendingWrite(), - by completedWrite()): GrContext has scheduled a
* write to the resource by the GPU as a result of a skia API call but hasn't executed it yet.
*
* The latter two ref types are private and intended only for Gr core code.
+ *
+ * When an item is purgable DERIVED:notifyIsPurgable() will be called (static poly morphism using
+ * CRTP). GrIORef and GrGpuResource are separate classes for organizational reasons and to be
+ * able to give access via friendship to only the functions related to pending IO operations.
*/
-class GrIORef : public SkNoncopyable {
+template <typename DERIVED> class GrIORef : public SkNoncopyable {
public:
SK_DECLARE_INST_COUNT_ROOT(GrIORef)
-
- enum IOType {
- kRead_IOType,
- kWrite_IOType,
- kRW_IOType
- };
-
virtual ~GrIORef();
// Some of the signatures are written to mirror SkRefCnt so that GrGpuResource can work with
// templated helper classes (e.g. SkAutoTUnref). However, we have different categories of
// refs (e.g. pending reads). We also don't require thread safety as GrCacheable objects are
// not intended to cross thread boundaries.
- // internal_dispose() exists because of GrTexture's reliance on it. It will be removed
- // soon.
void ref() const {
- ++fRefCnt;
- // pre-validate once internal_dispose is removed (and therefore 0 ref cnt is not allowed).
this->validate();
+ ++fRefCnt;
}
void unref() const {
this->validate();
--fRefCnt;
- if (0 == fRefCnt && 0 == fPendingReads && 0 == fPendingWrites) {
- this->internal_dispose();
- }
+ this->didUnref();
}
- virtual void internal_dispose() const { SkDELETE(this); }
-
- /** This is exists to service the old mechanism for recycling scratch textures. It will
- be removed soon. */
- bool unique() const { return 1 == (fRefCnt + fPendingReads + fPendingWrites); }
+ bool isPurgable() const { return this->reffedOnlyByCache() && !this->internalHasPendingIO(); }
+ bool reffedOnlyByCache() const { return 1 == fRefCnt; }
void validate() const {
#ifdef SK_DEBUG
@@ -80,9 +70,8 @@ public:
#endif
}
-
protected:
- GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) {}
+ GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0), fIsScratch(kNo_IsScratch) { }
bool internalHasPendingRead() const { return SkToBool(fPendingReads); }
bool internalHasPendingWrite() const { return SkToBool(fPendingWrites); }
@@ -97,9 +86,7 @@ private:
void completedRead() const {
this->validate();
--fPendingReads;
- if (0 == fRefCnt && 0 == fPendingReads && 0 == fPendingWrites) {
- this->internal_dispose();
- }
+ this->didUnref();
}
void addPendingWrite() const {
@@ -110,25 +97,45 @@ private:
void completedWrite() const {
this->validate();
--fPendingWrites;
- if (0 == fRefCnt && 0 == fPendingReads && 0 == fPendingWrites) {
- this->internal_dispose();
- }
+ this->didUnref();
}
private:
+ void didUnref() const {
+ if (0 == fPendingReads && 0 == fPendingWrites) {
+ if (0 == fRefCnt) {
+ SkDELETE(this);
+ } else if (1 == fRefCnt) {
+ // The one ref is the cache's
+ static_cast<const DERIVED*>(this)->notifyIsPurgable();
+ }
+ }
+ }
+
mutable int32_t fRefCnt;
mutable int32_t fPendingReads;
mutable int32_t fPendingWrites;
// This class is used to manage conversion of refs to pending reads/writes.
friend class GrGpuResourceRef;
- template <typename, IOType> friend class GrPendingIOResource;
+
+ // This is temporary until GrResourceCache is fully replaced by GrResourceCache2.
+ enum IsScratch {
+ kNo_IsScratch,
+ kYes_IsScratch
+ } fIsScratch;
+
+ friend class GrContext; // to set the above field.
+ friend class GrResourceCache; // to check the above field.
+ friend class GrResourceCache2; // to check the above field.
+
+ template <typename, GrIOType> friend class GrPendingIOResource;
};
/**
* Base class for objects that can be kept in the GrResourceCache.
*/
-class GrGpuResource : public GrIORef {
+class GrGpuResource : public GrIORef<GrGpuResource> {
public:
SK_DECLARE_INST_COUNT(GrGpuResource)
@@ -175,7 +182,7 @@ public:
virtual size_t gpuMemorySize() const = 0;
void setCacheEntry(GrResourceCacheEntry* cacheEntry) { fCacheEntry = cacheEntry; }
- GrResourceCacheEntry* getCacheEntry() { return fCacheEntry; }
+ GrResourceCacheEntry* getCacheEntry() const { return fCacheEntry; }
/**
* If this resource can be used as a scratch resource this returns a valid
@@ -224,6 +231,8 @@ protected:
void setScratchKey(const GrResourceKey& scratchKey);
private:
+ void notifyIsPurgable() const;
+
#ifdef SK_DEBUG
friend class GrGpu; // for assert in GrGpu to access getGpu
#endif
@@ -253,7 +262,8 @@ private:
GrResourceKey fScratchKey;
- typedef GrIORef INHERITED;
+ typedef GrIORef<GrGpuResource> INHERITED;
+ friend class GrIORef<GrGpuResource>; // to access notifyIsPurgable.
};
#endif
diff --git a/include/gpu/GrGpuResourceRef.h b/include/gpu/GrGpuResourceRef.h
index 3320dc069f..0223f18977 100644
--- a/include/gpu/GrGpuResourceRef.h
+++ b/include/gpu/GrGpuResourceRef.h
@@ -52,11 +52,11 @@ protected:
/** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
pending on the resource when markPendingIO is called. */
- GrGpuResourceRef(GrGpuResource*, GrIORef::IOType);
+ GrGpuResourceRef(GrGpuResource*, GrIOType);
/** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
pending on the resource when markPendingIO is called. */
- void setResource(GrGpuResource*, GrIORef::IOType);
+ void setResource(GrGpuResource*, GrIOType);
private:
/** Called by owning GrProgramElement when the program element is first scheduled for
@@ -79,10 +79,10 @@ private:
friend class GrOptDrawState;
friend class GrProgramElement;
- GrGpuResource* fResource;
- mutable bool fOwnRef;
- mutable bool fPendingIO;
- GrIORef::IOType fIOType;
+ GrGpuResource* fResource;
+ mutable bool fOwnRef;
+ mutable bool fPendingIO;
+ GrIOType fIOType;
typedef SkNoncopyable INHERITED;
};
@@ -96,13 +96,13 @@ public:
/** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
pending on the resource when markPendingIO is called. */
- GrTGpuResourceRef(T* resource, GrIORef::IOType ioType) : INHERITED(resource, ioType) {}
+ GrTGpuResourceRef(T* resource, GrIOType ioType) : INHERITED(resource, ioType) {}
T* get() const { return static_cast<T*>(this->getResource()); }
/** Adopts a ref from the caller. ioType expresses what type of IO operations will be marked as
pending on the resource when markPendingIO is called. */
- void set(T* resource, GrIORef::IOType ioType) { this->setResource(resource, ioType); }
+ void set(T* resource, GrIOType ioType) { this->setResource(resource, ioType); }
private:
typedef GrGpuResourceRef INHERITED;
@@ -112,18 +112,18 @@ private:
* This is similar to GrTGpuResourceRef but can only be in the pending IO state. It never owns a
* ref.
*/
-template <typename T, GrIORef::IOType IO_TYPE> class GrPendingIOResource : SkNoncopyable {
+template <typename T, GrIOType IO_TYPE> class GrPendingIOResource : SkNoncopyable {
public:
GrPendingIOResource(T* resource) : fResource(resource) {
if (NULL != fResource) {
switch (IO_TYPE) {
- case GrIORef::kRead_IOType:
+ case kRead_GrIOType:
fResource->addPendingRead();
break;
- case GrIORef::kWrite_IOType:
+ case kWrite_GrIOType:
fResource->addPendingWrite();
break;
- case GrIORef::kRW_IOType:
+ case kRW_GrIOType:
fResource->addPendingRead();
fResource->addPendingWrite();
break;
@@ -134,13 +134,13 @@ public:
~GrPendingIOResource() {
if (NULL != fResource) {
switch (IO_TYPE) {
- case GrIORef::kRead_IOType:
+ case kRead_GrIOType:
fResource->completedRead();
break;
- case GrIORef::kWrite_IOType:
+ case kWrite_GrIOType:
fResource->completedWrite();
break;
- case GrIORef::kRW_IOType:
+ case kRW_GrIOType:
fResource->completedRead();
fResource->completedWrite();
break;
@@ -151,6 +151,6 @@ public:
T* get() const { return fResource; }
private:
- T* fResource;
+ T* fResource;
};
#endif
diff --git a/include/gpu/GrTexture.h b/include/gpu/GrTexture.h
index 8bdff34df5..43b69eade2 100644
--- a/include/gpu/GrTexture.h
+++ b/include/gpu/GrTexture.h
@@ -99,8 +99,6 @@ protected:
void validateDesc() const;
private:
- void abandonReleaseCommon();
- virtual void internal_dispose() const SK_OVERRIDE;
void dirtyMipMaps(bool mipMapsDirty);
enum MipMapsStatus {
diff --git a/include/gpu/GrTypesPriv.h b/include/gpu/GrTypesPriv.h
index 94ec1d7cfa..0bae612db1 100644
--- a/include/gpu/GrTypesPriv.h
+++ b/include/gpu/GrTypesPriv.h
@@ -217,4 +217,13 @@ static inline GrPrimitiveEdgeType GrInvertProcessorEdgeType(const GrPrimitiveEdg
return kFillAA_GrProcessorEdgeType; // suppress warning.
}
+/**
+ * Indicates the type of pending IO operations that can be recorded for gpu resources.
+ */
+enum GrIOType {
+ kRead_GrIOType,
+ kWrite_GrIOType,
+ kRW_GrIOType
+};
+
#endif
diff --git a/include/gpu/SkGr.h b/include/gpu/SkGr.h
index df2ae5a320..026525be5d 100644
--- a/include/gpu/SkGr.h
+++ b/include/gpu/SkGr.h
@@ -70,9 +70,7 @@ static inline GrColor SkColor2GrColorJustAlpha(SkColor c) {
bool GrIsBitmapInCache(const GrContext*, const SkBitmap&, const GrTextureParams*);
-GrTexture* GrLockAndRefCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams*);
-
-void GrUnlockAndUnrefCachedBitmapTexture(GrTexture*);
+GrTexture* GrRefCachedBitmapTexture(GrContext*, const SkBitmap&, const GrTextureParams*);
////////////////////////////////////////////////////////////////////////////////
diff --git a/include/gpu/SkGrPixelRef.h b/include/gpu/SkGrPixelRef.h
index 7e6a9d02ed..9a81be67de 100644
--- a/include/gpu/SkGrPixelRef.h
+++ b/include/gpu/SkGrPixelRef.h
@@ -41,11 +41,9 @@ class SK_API SkGrPixelRef : public SkROLockPixelsPixelRef {
public:
SK_DECLARE_INST_COUNT(SkGrPixelRef)
/**
- * Constructs a pixel ref around a GrSurface. If the caller has locked the GrSurface in the
- * cache and would like the pixel ref to unlock it in its destructor then transferCacheLock
- * should be set to true.
+ * Constructs a pixel ref around a GrSurface.
*/
- SkGrPixelRef(const SkImageInfo&, GrSurface*, bool transferCacheLock = false);
+ SkGrPixelRef(const SkImageInfo&, GrSurface*);
virtual ~SkGrPixelRef();
// override from SkPixelRef
@@ -58,8 +56,6 @@ protected:
private:
GrSurface* fSurface;
- bool fUnlock; // if true the pixel ref owns a texture cache lock on fSurface
-
typedef SkROLockPixelsPixelRef INHERITED;
};
diff --git a/src/core/SkBitmapProcShader.cpp b/src/core/SkBitmapProcShader.cpp
index 9c900d35f8..a9798b8e24 100644
--- a/src/core/SkBitmapProcShader.cpp
+++ b/src/core/SkBitmapProcShader.cpp
@@ -459,9 +459,9 @@ bool SkBitmapProcShader::asFragmentProcessor(GrContext* context, const SkPaint&
}
GrTextureParams params(tm, textureFilterMode);
- GrTexture* texture = GrLockAndRefCachedBitmapTexture(context, fRawBitmap, &params);
+ SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(context, fRawBitmap, &params));
- if (NULL == texture) {
+ if (!texture) {
SkErrorInternals::SetError( kInternalError_SkError,
"Couldn't convert bitmap to texture.");
return false;
@@ -476,7 +476,6 @@ bool SkBitmapProcShader::asFragmentProcessor(GrContext* context, const SkPaint&
} else {
*fp = GrSimpleTextureEffect::Create(texture, matrix, params);
}
- GrUnlockAndUnrefCachedBitmapTexture(texture);
return true;
}
diff --git a/src/core/SkImageFilter.cpp b/src/core/SkImageFilter.cpp
index 56c310d37e..6f7762ac46 100644
--- a/src/core/SkImageFilter.cpp
+++ b/src/core/SkImageFilter.cpp
@@ -399,9 +399,8 @@ bool SkImageFilter::getInputResultGPU(SkImageFilter::Proxy* proxy,
if (kUnknown_SkColorType == info.colorType()) {
return false;
}
- GrTexture* resultTex = GrLockAndRefCachedBitmapTexture(context, *result, NULL);
- result->setPixelRef(new SkGrPixelRef(info, resultTex))->unref();
- GrUnlockAndUnrefCachedBitmapTexture(resultTex);
+ SkAutoTUnref<GrTexture> resultTex(GrRefCachedBitmapTexture(context, *result, NULL));
+ result->setPixelRef(SkNEW_ARGS(SkGrPixelRef, (info, resultTex)))->unref();
}
return true;
} else {
diff --git a/src/effects/SkPerlinNoiseShader.cpp b/src/effects/SkPerlinNoiseShader.cpp
index 43197dedf2..861e829237 100644
--- a/src/effects/SkPerlinNoiseShader.cpp
+++ b/src/effects/SkPerlinNoiseShader.cpp
@@ -997,10 +997,10 @@ bool SkPerlinNoiseShader::asFragmentProcessor(GrContext* context, const SkPaint&
SkPerlinNoiseShader::PaintingData* paintingData =
SkNEW_ARGS(PaintingData, (fTileSize, fSeed, fBaseFrequencyX, fBaseFrequencyY, matrix));
- GrTexture* permutationsTexture = GrLockAndRefCachedBitmapTexture(
- context, paintingData->getPermutationsBitmap(), NULL);
- GrTexture* noiseTexture = GrLockAndRefCachedBitmapTexture(
- context, paintingData->getNoiseBitmap(), NULL);
+ SkAutoTUnref<GrTexture> permutationsTexture(
+ GrRefCachedBitmapTexture(context, paintingData->getPermutationsBitmap(), NULL));
+ SkAutoTUnref<GrTexture> noiseTexture(
+ GrRefCachedBitmapTexture(context, paintingData->getNoiseBitmap(), NULL));
SkMatrix m = context->getMatrix();
m.setTranslateX(-localMatrix.getTranslateX() + SK_Scalar1);
@@ -1016,17 +1016,6 @@ bool SkPerlinNoiseShader::asFragmentProcessor(GrContext* context, const SkPaint&
SkDELETE(paintingData);
*fp = NULL;
}
-
- // Unlock immediately, this is not great, but we don't have a way of
- // knowing when else to unlock it currently. TODO: Remove this when
- // unref becomes the unlock replacement for all types of textures.
- if (permutationsTexture) {
- GrUnlockAndUnrefCachedBitmapTexture(permutationsTexture);
- }
- if (noiseTexture) {
- GrUnlockAndUnrefCachedBitmapTexture(noiseTexture);
- }
-
return true;
}
diff --git a/src/effects/SkTableColorFilter.cpp b/src/effects/SkTableColorFilter.cpp
index f6726ca078..8f385ce1b3 100644
--- a/src/effects/SkTableColorFilter.cpp
+++ b/src/effects/SkTableColorFilter.cpp
@@ -419,7 +419,6 @@ void ColorTableEffect::onComputeInvariantOutput(InvariantOutput* inout) const {
inout->fIsSingleComponent = false;
}
-
///////////////////////////////////////////////////////////////////////////////
GR_DEFINE_FRAGMENT_PROCESSOR_TEST(ColorTableEffect);
@@ -435,19 +434,10 @@ GrFragmentProcessor* ColorTableEffect::TestCreate(SkRandom* random,
GrFragmentProcessor* SkTable_ColorFilter::asFragmentProcessor(GrContext* context) const {
SkBitmap bitmap;
- GrFragmentProcessor* fp = NULL;
this->asComponentTable(&bitmap);
// passing NULL because this effect does no tiling or filtering.
- GrTexture* texture = GrLockAndRefCachedBitmapTexture(context, bitmap, NULL);
- if (texture) {
- fp = ColorTableEffect::Create(texture, fFlags);
-
- // Unlock immediately, this is not great, but we don't have a way of
- // knowing when else to unlock it currently. TODO: Remove this when
- // unref becomes the unlock replacement for all types of textures.
- GrUnlockAndUnrefCachedBitmapTexture(texture);
- }
- return fp;
+ SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(context, bitmap, NULL));
+ return texture ? ColorTableEffect::Create(texture, fFlags) : NULL;
}
#endif // SK_SUPPORT_GPU
diff --git a/src/effects/gradients/SkGradientShader.cpp b/src/effects/gradients/SkGradientShader.cpp
index 89b323a701..cb9cfff43f 100644
--- a/src/effects/gradients/SkGradientShader.cpp
+++ b/src/effects/gradients/SkGradientShader.cpp
@@ -1164,15 +1164,10 @@ GrGradientEffect::GrGradientEffect(GrContext* ctx,
fCoordTransform.reset(kCoordSet, matrix, fAtlas->getTexture());
fTextureAccess.reset(fAtlas->getTexture(), params);
} else {
- GrTexture* texture = GrLockAndRefCachedBitmapTexture(ctx, bitmap, &params);
+ SkAutoTUnref<GrTexture> texture(GrRefCachedBitmapTexture(ctx, bitmap, &params));
fCoordTransform.reset(kCoordSet, matrix, texture);
fTextureAccess.reset(texture, params);
fYCoord = SK_ScalarHalf;
-
- // Unlock immediately, this is not great, but we don't have a way of
- // knowing when else to unlock it currently, so it may get purged from
- // the cache, but it'll still be ref'd until it's no longer being used.
- GrUnlockAndUnrefCachedBitmapTexture(texture);
}
this->addTextureAccess(&fTextureAccess);
}
diff --git a/src/gpu/GrClipMaskCache.h b/src/gpu/GrClipMaskCache.h
index 6b484e8af8..a2495f1566 100644
--- a/src/gpu/GrClipMaskCache.h
+++ b/src/gpu/GrClipMaskCache.h
@@ -204,7 +204,9 @@ private:
fLastClipGenID = clipGenID;
- fLastMask.set(context, desc);
+ // HACK: set the last param to true to indicate that this request is at
+ // flush time and therefore we require a scratch texture with no pending IO operations.
+ fLastMask.set(context, desc, GrContext::kApprox_ScratchTexMatch, /*flushing=*/true);
fLastBound = bound;
}
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index d0f3cc5671..a722eed88a 100755
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -70,25 +70,6 @@ static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
-GrTexture* GrAutoScratchTexture::detach() {
- if (NULL == fTexture) {
- return NULL;
- }
- GrTexture* texture = fTexture;
- fTexture = NULL;
-
- // This GrAutoScratchTexture has a ref from lockAndRefScratchTexture, which we give up now.
- // The cache also has a ref which we are lending to the caller of detach(). When the caller
- // lets go of the ref and the ref count goes to 0 internal_dispose will see this flag is
- // set and re-ref the texture, thereby restoring the cache's ref.
- SkASSERT(!texture->unique());
- texture->texturePriv().setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
- texture->unref();
- SkASSERT(texture->getCacheEntry());
-
- return texture;
-}
-
// Glorified typedef to avoid including GrDrawState.h in GrContext.h
class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
@@ -153,7 +134,8 @@ bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
fDrawState = SkNEW(GrDrawState);
fGpu->setDrawState(fDrawState);
- fResourceCache = SkNEW_ARGS(GrResourceCache, (MAX_RESOURCE_CACHE_COUNT,
+ fResourceCache = SkNEW_ARGS(GrResourceCache, (fGpu->caps(),
+ MAX_RESOURCE_CACHE_COUNT,
MAX_RESOURCE_CACHE_BYTES));
fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
fResourceCache2 = SkNEW(GrResourceCache2);
@@ -446,9 +428,6 @@ GrTexture* GrContext::createTexture(const GrTextureParams* params,
}
if (texture) {
- // Adding a resource could put us overbudget. Try to free up the
- // necessary space before adding it.
- fResourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
fResourceCache->addResource(resourceKey, texture);
if (cacheKey) {
@@ -459,157 +438,71 @@ GrTexture* GrContext::createTexture(const GrTextureParams* params,
return texture;
}
-static GrTexture* create_scratch_texture(GrGpu* gpu,
- GrResourceCache* resourceCache,
- const GrTextureDesc& desc) {
- GrTexture* texture = gpu->createTexture(desc, NULL, 0);
- if (texture) {
- GrResourceKey key = GrTexturePriv::ComputeScratchKey(texture->desc());
- // Adding a resource could put us overbudget. Try to free up the
- // necessary space before adding it.
- resourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
- // Make the resource exclusive so future 'find' calls don't return it
- resourceCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
+GrTexture* GrContext::createNewScratchTexture(const GrTextureDesc& desc) {
+ GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
+ if (!texture) {
+ return NULL;
}
+ fResourceCache->addResource(texture->getScratchKey(), texture);
+ texture->fIsScratch = GrGpuResource::kYes_IsScratch;
return texture;
}
-GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
+GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match,
+ bool calledDuringFlush) {
+ // kNoStencil has no meaning if kRT isn't set.
SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
!(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
- // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
- SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
- !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
- (inDesc.fConfig != kAlpha_8_GrPixelConfig));
+ // Make sure caller has checked for renderability if kRT is set.
+ SkASSERT(!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
+ this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
- if (!fGpu->caps()->reuseScratchTextures() &&
- !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
- // If we're never recycling this texture we can always make it the right size
- return create_scratch_texture(fGpu, fResourceCache, inDesc);
- }
-
- GrTextureDesc desc = inDesc;
+ SkTCopyOnFirstWrite<GrTextureDesc> desc(inDesc);
- if (kApprox_ScratchTexMatch == match) {
- // bin by pow2 with a reasonable min
- static const int MIN_SIZE = 16;
- desc.fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
- desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
- }
-
- GrGpuResource* resource = NULL;
- int origWidth = desc.fWidth;
- int origHeight = desc.fHeight;
-
- do {
- GrResourceKey key = GrTexturePriv::ComputeScratchKey(desc);
- // Ensure we have exclusive access to the texture so future 'find' calls don't return it
- resource = fResourceCache->find(key, GrResourceCache::kHide_OwnershipFlag);
- if (resource) {
- resource->ref();
- break;
- }
- if (kExact_ScratchTexMatch == match) {
- break;
- }
- // We had a cache miss and we are in approx mode, relax the fit of the flags.
-
- // We no longer try to reuse textures that were previously used as render targets in
- // situations where no RT is needed; doing otherwise can confuse the video driver and
- // cause significant performance problems in some cases.
- if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
- desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
- } else {
- break;
+ if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrTextureFlagBit)) {
+ GrTextureFlags origFlags = desc->fFlags;
+ if (kApprox_ScratchTexMatch == match) {
+ // bin by pow2 with a reasonable min
+ static const int MIN_SIZE = 16;
+ GrTextureDesc* wdesc = desc.writable();
+ wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
+ wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
}
- } while (true);
-
- if (NULL == resource) {
- desc.fFlags = inDesc.fFlags;
- desc.fWidth = origWidth;
- desc.fHeight = origHeight;
- resource = create_scratch_texture(fGpu, fResourceCache, desc);
- }
-
- return static_cast<GrTexture*>(resource);
-}
-
-void GrContext::addExistingTextureToCache(GrTexture* texture) {
-
- if (NULL == texture) {
- return;
- }
-
- // This texture should already have a cache entry since it was once
- // attached
- SkASSERT(texture->getCacheEntry());
-
- // Conceptually, the cache entry is going to assume responsibility
- // for the creation ref. Assert refcnt == 1.
- // Except that this also gets called when the texture is prematurely
- // abandoned. In that case the ref count may be > 1.
- // SkASSERT(texture->unique());
+ do {
+ GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
+ GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key,
+ calledDuringFlush);
+ if (resource) {
+ fResourceCache->makeResourceMRU(resource);
+ return static_cast<GrTexture*>(resource);
+ }
- if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
- // Since this texture came from an AutoScratchTexture it should
- // still be in the exclusive pile. Recycle it.
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- this->purgeCache();
- } else {
- // When we aren't reusing textures we know this scratch texture
- // will never be reused and would be just wasting time in the cache
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- fResourceCache->deleteResource(texture->getCacheEntry());
- }
-}
+ if (kExact_ScratchTexMatch == match) {
+ break;
+ }
+ // We had a cache miss and we are in approx mode, relax the fit of the flags.
+
+ // We no longer try to reuse textures that were previously used as render targets in
+ // situations where no RT is needed; doing otherwise can confuse the video driver and
+ // cause significant performance problems in some cases.
+ if (desc->fFlags & kNoStencil_GrTextureFlagBit) {
+ desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrTextureFlagBit;
+ } else {
+ break;
+ }
-void GrContext::unlockScratchTexture(GrTexture* texture) {
- if (texture->wasDestroyed()) {
- if (texture->getCacheEntry()->key().isScratch()) {
- // This texture was detached from the cache but the cache still had a ref to it but
- // not a pointer to it. This will unref the texture and delete its resource cache
- // entry.
- delete texture->getCacheEntry();
- }
- return;
- }
+ } while (true);
- ASSERT_OWNED_RESOURCE(texture);
- SkASSERT(texture->getCacheEntry());
-
- // If this is a scratch texture we detached it from the cache
- // while it was locked (to avoid two callers simultaneously getting
- // the same texture).
- if (texture->getCacheEntry()->key().isScratch()) {
- if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- this->purgeCache();
- } else if (texture->unique()) {
- // Only the cache now knows about this texture. Since we're never
- // reusing scratch textures (in this code path) it would just be
- // wasting time sitting in the cache.
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- fResourceCache->deleteResource(texture->getCacheEntry());
- } else {
- // In this case (there is still a non-cache ref) but we don't really
- // want to readd it to the cache (since it will never be reused).
- // Instead, give up the cache's ref and leave the decision up to
- // addExistingTextureToCache once its ref count reaches 0. For
- // this to work we need to leave it in the exclusive list.
- texture->texturePriv().setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
- // Give up the cache's ref to the texture
- texture->unref();
- }
+ desc.writable()->fFlags = origFlags;
}
-}
-void GrContext::purgeCache() {
- if (fResourceCache) {
- fResourceCache->purgeAsNeeded();
- }
+ GrTexture* texture = this->createNewScratchTexture(*desc);
+ SkASSERT(NULL == texture ||
+ texture->getScratchKey() == GrTexturePriv::ComputeScratchKey(*desc));
+ return texture;
}
bool GrContext::OverbudgetCB(void* data) {
@@ -1349,6 +1242,7 @@ void GrContext::flush(int flagsBitfield) {
} else {
fDrawBuffer->flush();
}
+ fResourceCache->purgeAsNeeded();
fFlushToReduceCacheSize = false;
}
@@ -1941,7 +1835,6 @@ const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
}
void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
- fResourceCache->purgeAsNeeded(1, resource->gpuMemorySize());
fResourceCache->addResource(resourceKey, resource);
}
diff --git a/src/gpu/GrDrawState.h b/src/gpu/GrDrawState.h
index 9d653e6f5c..5575b12966 100644
--- a/src/gpu/GrDrawState.h
+++ b/src/gpu/GrDrawState.h
@@ -510,7 +510,7 @@ public:
* @param target The render target to set.
*/
void setRenderTarget(GrRenderTarget* target) {
- fRenderTarget.set(SkSafeRef(target), GrIORef::kWrite_IOType);
+ fRenderTarget.set(SkSafeRef(target), kWrite_GrIOType);
this->invalidateOptState();
}
diff --git a/src/gpu/GrGpuResource.cpp b/src/gpu/GrGpuResource.cpp
index bba934e605..a074d7c29e 100644
--- a/src/gpu/GrGpuResource.cpp
+++ b/src/gpu/GrGpuResource.cpp
@@ -11,7 +11,7 @@
#include "GrResourceCache2.h"
#include "GrGpu.h"
-GrIORef::~GrIORef() {
+template<typename D> GrIORef<D>::~GrIORef() {
SkASSERT(0 == fRefCnt);
SkASSERT(0 == fPendingReads);
SkASSERT(0 == fPendingWrites);
@@ -28,6 +28,13 @@ static inline GrResourceCache2* get_resource_cache2(GrGpu* gpu) {
return gpu->getContext()->getResourceCache2();
}
+static inline GrResourceCache* get_resource_cache(GrGpu* gpu) {
+ SkASSERT(gpu);
+ SkASSERT(gpu->getContext());
+ SkASSERT(gpu->getContext()->getResourceCache());
+ return gpu->getContext()->getResourceCache();
+}
+
GrGpuResource::GrGpuResource(GrGpu* gpu, bool isWrapped)
: fGpu(gpu)
, fCacheEntry(NULL)
@@ -81,6 +88,12 @@ GrContext* GrGpuResource::getContext() {
}
}
+void GrGpuResource::notifyIsPurgable() const {
+ if (fCacheEntry && !this->wasDestroyed()) {
+ get_resource_cache(fGpu)->notifyPurgable(this);
+ }
+}
+
void GrGpuResource::setScratchKey(const GrResourceKey& scratchKey) {
SkASSERT(fScratchKey.isNullScratch());
SkASSERT(scratchKey.isScratch());
diff --git a/src/gpu/GrGpuResourceRef.cpp b/src/gpu/GrGpuResourceRef.cpp
index 7c521df462..9e0e6f8e50 100644
--- a/src/gpu/GrGpuResourceRef.cpp
+++ b/src/gpu/GrGpuResourceRef.cpp
@@ -13,7 +13,7 @@ GrGpuResourceRef::GrGpuResourceRef() {
fPendingIO = false;
}
-GrGpuResourceRef::GrGpuResourceRef(GrGpuResource* resource, GrIORef::IOType ioType) {
+GrGpuResourceRef::GrGpuResourceRef(GrGpuResource* resource, GrIOType ioType) {
fResource = NULL;
fOwnRef = false;
fPendingIO = false;
@@ -27,13 +27,13 @@ GrGpuResourceRef::~GrGpuResourceRef() {
}
if (fPendingIO) {
switch (fIOType) {
- case GrIORef::kRead_IOType:
+ case kRead_GrIOType:
fResource->completedRead();
break;
- case GrIORef::kWrite_IOType:
+ case kWrite_GrIOType:
fResource->completedWrite();
break;
- case GrIORef::kRW_IOType:
+ case kRW_GrIOType:
fResource->completedRead();
fResource->completedWrite();
break;
@@ -51,7 +51,7 @@ void GrGpuResourceRef::reset() {
}
}
-void GrGpuResourceRef::setResource(GrGpuResource* resource, GrIORef::IOType ioType) {
+void GrGpuResourceRef::setResource(GrGpuResource* resource, GrIOType ioType) {
SkASSERT(!fPendingIO);
SkASSERT(SkToBool(fResource) == fOwnRef);
SkSafeUnref(fResource);
@@ -72,13 +72,13 @@ void GrGpuResourceRef::markPendingIO() const {
SkASSERT(fResource);
fPendingIO = true;
switch (fIOType) {
- case GrIORef::kRead_IOType:
+ case kRead_GrIOType:
fResource->addPendingRead();
break;
- case GrIORef::kWrite_IOType:
+ case kWrite_GrIOType:
fResource->addPendingWrite();
break;
- case GrIORef::kRW_IOType:
+ case kRW_GrIOType:
fResource->addPendingRead();
fResource->addPendingWrite();
break;
@@ -91,13 +91,13 @@ void GrGpuResourceRef::pendingIOComplete() const {
SkASSERT(fOwnRef);
SkASSERT(fPendingIO);
switch (fIOType) {
- case GrIORef::kRead_IOType:
+ case kRead_GrIOType:
fResource->completedRead();
break;
- case GrIORef::kWrite_IOType:
+ case kWrite_GrIOType:
fResource->completedWrite();
break;
- case GrIORef::kRW_IOType:
+ case kRW_GrIOType:
fResource->completedRead();
fResource->completedWrite();
break;
diff --git a/src/gpu/GrInOrderDrawBuffer.h b/src/gpu/GrInOrderDrawBuffer.h
index e6ed06eadb..485de07db5 100644
--- a/src/gpu/GrInOrderDrawBuffer.h
+++ b/src/gpu/GrInOrderDrawBuffer.h
@@ -109,8 +109,8 @@ private:
const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); }
private:
- GrPendingIOResource<const GrVertexBuffer, GrIORef::kRead_IOType> fVertexBuffer;
- GrPendingIOResource<const GrIndexBuffer, GrIORef::kRead_IOType> fIndexBuffer;
+ GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuffer;
+ GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffer;
};
struct StencilPath : public ::SkNoncopyable {
@@ -121,7 +121,7 @@ private:
SkPath::FillType fFill;
private:
- GrPendingIOResource<const GrPath, GrIORef::kRead_IOType> fPath;
+ GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
};
struct DrawPath : public ::SkNoncopyable {
@@ -133,7 +133,7 @@ private:
GrDeviceCoordTexture fDstCopy;
private:
- GrPendingIOResource<const GrPath, GrIORef::kRead_IOType> fPath;
+ GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
};
struct DrawPaths : public ::SkNoncopyable {
@@ -159,7 +159,7 @@ private:
GrDeviceCoordTexture fDstCopy;
private:
- GrPendingIOResource<const GrPathRange, GrIORef::kRead_IOType> fPathRange;
+ GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange;
};
// This is also used to record a discard by setting the color to GrColor_ILLEGAL
@@ -173,7 +173,7 @@ private:
bool fCanIgnoreRect;
private:
- GrPendingIOResource<GrRenderTarget, GrIORef::kWrite_IOType> fRenderTarget;
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
};
struct CopySurface : public ::SkNoncopyable {
@@ -186,8 +186,8 @@ private:
SkIRect fSrcRect;
private:
- GrPendingIOResource<GrSurface, GrIORef::kWrite_IOType> fDst;
- GrPendingIOResource<GrSurface, GrIORef::kRead_IOType> fSrc;
+ GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst;
+ GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc;
};
struct Clip : public ::SkNoncopyable {
diff --git a/src/gpu/GrLayerCache.cpp b/src/gpu/GrLayerCache.cpp
index f90ab55a5e..0481d144e5 100644
--- a/src/gpu/GrLayerCache.cpp
+++ b/src/gpu/GrLayerCache.cpp
@@ -245,7 +245,6 @@ void GrLayerCache::unlock(GrCachedLayer* layer) {
#endif
} else {
- fContext->unlockScratchTexture(layer->texture());
layer->setTexture(NULL, GrIRect16::MakeEmpty());
}
diff --git a/src/gpu/GrOptDrawState.cpp b/src/gpu/GrOptDrawState.cpp
index b60a4cef8a..c8218d7a90 100644
--- a/src/gpu/GrOptDrawState.cpp
+++ b/src/gpu/GrOptDrawState.cpp
@@ -15,8 +15,7 @@ GrOptDrawState::GrOptDrawState(const GrDrawState& drawState,
GrBlendCoeff optSrcCoeff,
GrBlendCoeff optDstCoeff,
const GrDrawTargetCaps& caps) {
- fRenderTarget.set(SkSafeRef(drawState.getRenderTarget()),
- GrIORef::kWrite_IOType);
+ fRenderTarget.set(SkSafeRef(drawState.getRenderTarget()), kWrite_GrIOType);
fColor = drawState.getColor();
fCoverage = drawState.getCoverage();
fViewMatrix = drawState.getViewMatrix();
diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp
index e9be509569..c683b5bcfc 100644
--- a/src/gpu/GrResourceCache.cpp
+++ b/src/gpu/GrResourceCache.cpp
@@ -10,6 +10,8 @@
#include "GrResourceCache.h"
#include "GrGpuResource.h"
+#include "GrTexturePriv.h"
+
DECLARE_SKMESSAGEBUS_MESSAGE(GrResourceInvalidatedMessage);
@@ -76,20 +78,17 @@ void GrResourceCacheEntry::didChangeResourceSize() {
///////////////////////////////////////////////////////////////////////////////
-GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
- fMaxCount(maxCount),
- fMaxBytes(maxBytes) {
+GrResourceCache::GrResourceCache(const GrDrawTargetCaps* caps, int maxCount, size_t maxBytes)
+ : fMaxCount(maxCount)
+ , fMaxBytes(maxBytes)
+ , fCaps(SkRef(caps)) {
#if GR_CACHE_STATS
fHighWaterEntryCount = 0;
fHighWaterEntryBytes = 0;
- fHighWaterClientDetachedCount = 0;
- fHighWaterClientDetachedBytes = 0;
#endif
fEntryCount = 0;
fEntryBytes = 0;
- fClientDetachedCount = 0;
- fClientDetachedBytes = 0;
fPurging = false;
@@ -136,55 +135,26 @@ void GrResourceCache::setLimits(int maxResources, size_t maxResourceBytes) {
}
}
-void GrResourceCache::internalDetach(GrResourceCacheEntry* entry,
- BudgetBehaviors behavior) {
+void GrResourceCache::internalDetach(GrResourceCacheEntry* entry) {
fList.remove(entry);
-
- // update our stats
- if (kIgnore_BudgetBehavior == behavior) {
- fClientDetachedCount += 1;
- fClientDetachedBytes += entry->fCachedSize;
-
-#if GR_CACHE_STATS
- if (fHighWaterClientDetachedCount < fClientDetachedCount) {
- fHighWaterClientDetachedCount = fClientDetachedCount;
- }
- if (fHighWaterClientDetachedBytes < fClientDetachedBytes) {
- fHighWaterClientDetachedBytes = fClientDetachedBytes;
- }
-#endif
-
- } else {
- SkASSERT(kAccountFor_BudgetBehavior == behavior);
-
- fEntryCount -= 1;
- fEntryBytes -= entry->fCachedSize;
- }
+ fEntryCount -= 1;
+ fEntryBytes -= entry->fCachedSize;
}
-void GrResourceCache::attachToHead(GrResourceCacheEntry* entry,
- BudgetBehaviors behavior) {
+void GrResourceCache::attachToHead(GrResourceCacheEntry* entry) {
fList.addToHead(entry);
- // update our stats
- if (kIgnore_BudgetBehavior == behavior) {
- fClientDetachedCount -= 1;
- fClientDetachedBytes -= entry->fCachedSize;
- } else {
- SkASSERT(kAccountFor_BudgetBehavior == behavior);
-
- fEntryCount += 1;
- fEntryBytes += entry->fCachedSize;
+ fEntryCount += 1;
+ fEntryBytes += entry->fCachedSize;
#if GR_CACHE_STATS
- if (fHighWaterEntryCount < fEntryCount) {
- fHighWaterEntryCount = fEntryCount;
- }
- if (fHighWaterEntryBytes < fEntryBytes) {
- fHighWaterEntryBytes = fEntryBytes;
- }
-#endif
+ if (fHighWaterEntryCount < fEntryCount) {
+ fHighWaterEntryCount = fEntryCount;
}
+ if (fHighWaterEntryBytes < fEntryBytes) {
+ fHighWaterEntryBytes = fEntryBytes;
+ }
+#endif
}
// This functor just searches for an entry with only a single ref (from
@@ -193,41 +163,53 @@ void GrResourceCache::attachToHead(GrResourceCacheEntry* entry,
class GrTFindUnreffedFunctor {
public:
bool operator()(const GrResourceCacheEntry* entry) const {
- return entry->resource()->unique();
+ return entry->resource()->isPurgable();
}
};
-GrGpuResource* GrResourceCache::find(const GrResourceKey& key, uint32_t ownershipFlags) {
+
+void GrResourceCache::makeResourceMRU(GrGpuResource* resource) {
+ GrResourceCacheEntry* entry = resource->getCacheEntry();
+ if (entry) {
+ this->internalDetach(entry);
+ this->attachToHead(entry);
+ }
+}
+
+void GrResourceCache::notifyPurgable(const GrGpuResource* resource) {
+ // Remove scratch textures from the cache the moment they become purgeable if
+ // scratch texture reuse is turned off.
+ SkASSERT(resource->getCacheEntry());
+ if (resource->getCacheEntry()->key().getResourceType() == GrTexturePriv::ResourceType() &&
+ resource->fIsScratch &&
+ !fCaps->reuseScratchTextures() &&
+ !(static_cast<const GrTexture*>(resource)->desc().fFlags &
+ kRenderTarget_GrTextureFlagBit)) {
+ this->deleteResource(resource->getCacheEntry());
+ }
+}
+
+GrGpuResource* GrResourceCache::find(const GrResourceKey& key) {
GrAutoResourceCacheValidate atcv(this);
GrResourceCacheEntry* entry = NULL;
- if (ownershipFlags & kNoOtherOwners_OwnershipFlag) {
- GrTFindUnreffedFunctor functor;
-
- entry = fCache.find<GrTFindUnreffedFunctor>(key, functor);
- } else {
- entry = fCache.find(key);
- }
+ entry = fCache.find(key);
if (NULL == entry) {
return NULL;
}
- if (ownershipFlags & kHide_OwnershipFlag) {
- this->makeExclusive(entry);
- } else {
- // Make this resource MRU
- this->internalDetach(entry);
- this->attachToHead(entry);
- }
+ // Make this resource MRU
+ this->internalDetach(entry);
+ this->attachToHead(entry);
+ // GrResourceCache2 is responsible for scratch resources.
+ SkASSERT(GrGpuResource::kNo_IsScratch == entry->resource()->fIsScratch);
return entry->fResource;
}
-void GrResourceCache::addResource(const GrResourceKey& key,
- GrGpuResource* resource,
- uint32_t ownershipFlags) {
+void GrResourceCache::addResource(const GrResourceKey& key, GrGpuResource* resource) {
SkASSERT(NULL == resource->getCacheEntry());
// we don't expect to create new resources during a purge. In theory
// this could cause purgeAsNeeded() into an infinite loop (e.g.
@@ -242,76 +224,16 @@ void GrResourceCache::addResource(const GrResourceKey& key,
this->attachToHead(entry);
fCache.insert(key, entry);
- if (ownershipFlags & kHide_OwnershipFlag) {
- this->makeExclusive(entry);
- }
-
-}
-
-void GrResourceCache::makeExclusive(GrResourceCacheEntry* entry) {
- GrAutoResourceCacheValidate atcv(this);
-
- SkASSERT(!entry->fIsExclusive);
- entry->fIsExclusive = true;
-
- // When scratch textures are detached (to hide them from future finds) they
- // still count against the resource budget
- this->internalDetach(entry, kIgnore_BudgetBehavior);
- fCache.remove(entry->key(), entry);
-
-#ifdef SK_DEBUG
- fExclusiveList.addToHead(entry);
-#endif
-}
-
-void GrResourceCache::removeInvalidResource(GrResourceCacheEntry* entry) {
- // If the resource went invalid while it was detached then purge it
- // This can happen when a 3D context was lost,
- // the client called GrContext::abandonContext() to notify Gr,
- // and then later an SkGpuDevice's destructor releases its backing
- // texture (which was invalidated at contextDestroyed time).
- // TODO: Safely delete the GrResourceCacheEntry as well.
- fClientDetachedCount -= 1;
- fEntryCount -= 1;
- fClientDetachedBytes -= entry->fCachedSize;
- fEntryBytes -= entry->fCachedSize;
- entry->fCachedSize = 0;
-}
-
-void GrResourceCache::makeNonExclusive(GrResourceCacheEntry* entry) {
- GrAutoResourceCacheValidate atcv(this);
-
-#ifdef SK_DEBUG
- fExclusiveList.remove(entry);
-#endif
-
- if (!entry->resource()->wasDestroyed()) {
- // Since scratch textures still count against the cache budget even
- // when they have been removed from the cache, re-adding them doesn't
- // alter the budget information.
- attachToHead(entry, kIgnore_BudgetBehavior);
- fCache.insert(entry->key(), entry);
-
- SkASSERT(entry->fIsExclusive);
- entry->fIsExclusive = false;
- } else {
- this->removeInvalidResource(entry);
- }
+ this->purgeAsNeeded();
}
void GrResourceCache::didIncreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountInc) {
fEntryBytes += amountInc;
- if (entry->fIsExclusive) {
- fClientDetachedBytes += amountInc;
- }
this->purgeAsNeeded();
}
void GrResourceCache::didDecreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountDec) {
fEntryBytes -= amountDec;
- if (entry->fIsExclusive) {
- fClientDetachedBytes -= amountDec;
- }
#ifdef SK_DEBUG
this->validate();
#endif
@@ -359,13 +281,6 @@ void GrResourceCache::purgeInvalidated() {
fInvalidationInbox.poll(&invalidated);
for (int i = 0; i < invalidated.count(); i++) {
- // We're somewhat missing an opportunity here. We could use the
- // default find functor that gives us back resources whether we own
- // them exclusively or not, and when they're not exclusively owned mark
- // them for purging later when they do become exclusively owned.
- //
- // This is complicated and confusing. May try this in the future. For
- // now, these resources are just LRU'd as if we never got the message.
while (GrResourceCacheEntry* entry = fCache.find(invalidated[i].key, GrTFindUnreffedFunctor())) {
this->deleteResource(entry);
}
@@ -373,7 +288,7 @@ void GrResourceCache::purgeInvalidated() {
}
void GrResourceCache::deleteResource(GrResourceCacheEntry* entry) {
- SkASSERT(entry->fResource->unique());
+ SkASSERT(entry->fResource->isPurgable());
// remove from our cache
fCache.remove(entry->key(), entry);
@@ -412,7 +327,7 @@ void GrResourceCache::internalPurge(int extraCount, size_t extraBytes) {
}
GrResourceCacheEntry* prev = iter.prev();
- if (entry->fResource->unique()) {
+ if (entry->fResource->isPurgable()) {
changed = true;
this->deleteResource(entry);
}
@@ -435,14 +350,7 @@ void GrResourceCache::purgeAllUnlocked() {
this->purgeAsNeeded();
#ifdef SK_DEBUG
- SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
- SkASSERT(countBytes(fExclusiveList) == fClientDetachedBytes);
if (!fCache.count()) {
- // Items may have been detached from the cache (such as the backing
- // texture for an SkGpuDevice). The above purge would not have removed
- // them.
- SkASSERT(fEntryCount == fClientDetachedCount);
- SkASSERT(fEntryBytes == fClientDetachedBytes);
SkASSERT(fList.isEmpty());
}
#endif
@@ -474,25 +382,14 @@ static bool both_zero_or_nonzero(int count, size_t bytes) {
void GrResourceCache::validate() const {
fList.validate();
- fExclusiveList.validate();
SkASSERT(both_zero_or_nonzero(fEntryCount, fEntryBytes));
- SkASSERT(both_zero_or_nonzero(fClientDetachedCount, fClientDetachedBytes));
- SkASSERT(fClientDetachedBytes <= fEntryBytes);
- SkASSERT(fClientDetachedCount <= fEntryCount);
- SkASSERT((fEntryCount - fClientDetachedCount) == fCache.count());
+ SkASSERT(fEntryCount == fCache.count());
EntryList::Iter iter;
- // check that the exclusively held entries are okay
- const GrResourceCacheEntry* entry = iter.init(const_cast<EntryList&>(fExclusiveList),
- EntryList::Iter::kHead_IterStart);
-
- for ( ; entry; entry = iter.next()) {
- entry->validate();
- }
-
// check that the shareable entries are okay
- entry = iter.init(const_cast<EntryList&>(fList), EntryList::Iter::kHead_IterStart);
+ const GrResourceCacheEntry* entry = iter.init(const_cast<EntryList&>(fList),
+ EntryList::Iter::kHead_IterStart);
int count = 0;
for ( ; entry; entry = iter.next()) {
@@ -500,17 +397,11 @@ void GrResourceCache::validate() const {
SkASSERT(fCache.find(entry->key()));
count += 1;
}
- SkASSERT(count == fEntryCount - fClientDetachedCount);
-
- size_t bytes = countBytes(fList);
- SkASSERT(bytes == fEntryBytes - fClientDetachedBytes);
-
- bytes = countBytes(fExclusiveList);
- SkASSERT(bytes == fClientDetachedBytes);
-
- SkASSERT(fList.countEntries() == fEntryCount - fClientDetachedCount);
+ SkASSERT(count == fEntryCount);
- SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
+ size_t bytes = this->countBytes(fList);
+ SkASSERT(bytes == fEntryBytes);
+ SkASSERT(fList.countEntries() == fEntryCount);
}
#endif // SK_DEBUG
@@ -534,10 +425,6 @@ void GrResourceCache::printStats() {
fEntryCount, locked, fHighWaterEntryCount);
SkDebugf("\t\tEntry Bytes: current %d high %d\n",
fEntryBytes, fHighWaterEntryBytes);
- SkDebugf("\t\tDetached Entry Count: current %d high %d\n",
- fClientDetachedCount, fHighWaterClientDetachedCount);
- SkDebugf("\t\tDetached Bytes: current %d high %d\n",
- fClientDetachedBytes, fHighWaterClientDetachedBytes);
}
#endif
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
index 83337805a4..f6d064af39 100644
--- a/src/gpu/GrResourceCache.h
+++ b/src/gpu/GrResourceCache.h
@@ -6,11 +6,10 @@
* found in the LICENSE file.
*/
-
-
#ifndef GrResourceCache_DEFINED
#define GrResourceCache_DEFINED
+#include "GrDrawTargetCaps.h"
#include "GrResourceKey.h"
#include "SkTMultiMap.h"
#include "SkMessageBus.h"
@@ -88,7 +87,7 @@ private:
*/
class GrResourceCache {
public:
- GrResourceCache(int maxCount, size_t maxBytes);
+ GrResourceCache(const GrDrawTargetCaps*, int maxCount, size_t maxBytes);
~GrResourceCache();
/**
@@ -141,26 +140,16 @@ public:
*/
int getCachedResourceCount() const { return fEntryCount; }
- // For a found or added resource to be completely exclusive to the caller
- // both the kNoOtherOwners and kHide flags need to be specified
- enum OwnershipFlags {
- kNoOtherOwners_OwnershipFlag = 0x1, // found/added resource has no other owners
- kHide_OwnershipFlag = 0x2 // found/added resource is hidden from future 'find's
- };
-
/**
* Search for an entry with the same Key. If found, return it.
* If not found, return null.
- * If ownershipFlags includes kNoOtherOwners and a resource is returned
- * then that resource has no other refs to it.
- * If ownershipFlags includes kHide and a resource is returned then that
- * resource will not be returned from future 'find' calls until it is
- * 'freed' (and recycled) or makeNonExclusive is called.
- * For a resource to be completely exclusive to a caller both kNoOtherOwners
- * and kHide must be specified.
*/
- GrGpuResource* find(const GrResourceKey& key,
- uint32_t ownershipFlags = 0);
+ GrGpuResource* find(const GrResourceKey& key);
+
+ void makeResourceMRU(GrGpuResource*);
+
+ /** Called by GrGpuResources when they detects that they are newly purgable. */
+ void notifyPurgable(const GrGpuResource*);
/**
* Add the new resource to the cache (by creating a new cache entry based
@@ -168,14 +157,8 @@ public:
*
* Ownership of the resource is transferred to the resource cache,
* which will unref() it when it is purged or deleted.
- *
- * If ownershipFlags includes kHide, subsequent calls to 'find' will not
- * return 'resource' until it is 'freed' (and recycled) or makeNonExclusive
- * is called.
*/
- void addResource(const GrResourceKey& key,
- GrGpuResource* resource,
- uint32_t ownershipFlags = 0);
+ void addResource(const GrResourceKey& key, GrGpuResource* resource);
/**
* Determines if the cache contains an entry matching a key. If a matching
@@ -184,20 +167,6 @@ public:
bool hasKey(const GrResourceKey& key) const { return SkToBool(fCache.find(key)); }
/**
- * Hide 'entry' so that future searches will not find it. Such
- * hidden entries will not be purged. The entry still counts against
- * the cache's budget and should be made non-exclusive when exclusive access
- * is no longer needed.
- */
- void makeExclusive(GrResourceCacheEntry* entry);
-
- /**
- * Restore 'entry' so that it can be found by future searches. 'entry'
- * will also be purgeable (provided its lock count is now 0.)
- */
- void makeNonExclusive(GrResourceCacheEntry* entry);
-
- /**
* Notify the cache that the size of a resource has changed.
*/
void didIncreaseResourceSize(const GrResourceCacheEntry*, size_t amountInc);
@@ -237,59 +206,45 @@ public:
#endif
private:
- enum BudgetBehaviors {
- kAccountFor_BudgetBehavior,
- kIgnore_BudgetBehavior
- };
-
- void internalDetach(GrResourceCacheEntry*, BudgetBehaviors behavior = kAccountFor_BudgetBehavior);
- void attachToHead(GrResourceCacheEntry*, BudgetBehaviors behavior = kAccountFor_BudgetBehavior);
-
- void removeInvalidResource(GrResourceCacheEntry* entry);
+ void internalDetach(GrResourceCacheEntry*);
+ void attachToHead(GrResourceCacheEntry*);
+ void purgeInvalidated();
+ void internalPurge(int extraCount, size_t extraBytes);
+#ifdef SK_DEBUG
+ static size_t countBytes(const SkTInternalLList<GrResourceCacheEntry>& list);
+#endif
- SkTMultiMap<GrResourceCacheEntry, GrResourceKey> fCache;
+ typedef SkTMultiMap<GrResourceCacheEntry, GrResourceKey> CacheMap;
+ CacheMap fCache;
// We're an internal doubly linked list
typedef SkTInternalLList<GrResourceCacheEntry> EntryList;
- EntryList fList;
-
-#ifdef SK_DEBUG
- // These objects cannot be returned by a search
- EntryList fExclusiveList;
-#endif
+ EntryList fList;
// our budget, used in purgeAsNeeded()
- int fMaxCount;
- size_t fMaxBytes;
+ int fMaxCount;
+ size_t fMaxBytes;
// our current stats, related to our budget
#if GR_CACHE_STATS
- int fHighWaterEntryCount;
- size_t fHighWaterEntryBytes;
- int fHighWaterClientDetachedCount;
- size_t fHighWaterClientDetachedBytes;
+ int fHighWaterEntryCount;
+ size_t fHighWaterEntryBytes;
#endif
- int fEntryCount;
- size_t fEntryBytes;
- int fClientDetachedCount;
- size_t fClientDetachedBytes;
+ int fEntryCount;
+ size_t fEntryBytes;
// prevents recursive purging
- bool fPurging;
+ bool fPurging;
- PFOverbudgetCB fOverbudgetCB;
- void* fOverbudgetData;
+ PFOverbudgetCB fOverbudgetCB;
+ void* fOverbudgetData;
- void internalPurge(int extraCount, size_t extraBytes);
+ SkAutoTUnref<const GrDrawTargetCaps> fCaps;
// Listen for messages that a resource has been invalidated and purge cached junk proactively.
- SkMessageBus<GrResourceInvalidatedMessage>::Inbox fInvalidationInbox;
- void purgeInvalidated();
-
-#ifdef SK_DEBUG
- static size_t countBytes(const SkTInternalLList<GrResourceCacheEntry>& list);
-#endif
+ typedef SkMessageBus<GrResourceInvalidatedMessage>::Inbox Inbox;
+ Inbox fInvalidationInbox;
};
///////////////////////////////////////////////////////////////////////////////
diff --git a/src/gpu/GrResourceCache2.cpp b/src/gpu/GrResourceCache2.cpp
index e0ba26ae83..85e66a7642 100644
--- a/src/gpu/GrResourceCache2.cpp
+++ b/src/gpu/GrResourceCache2.cpp
@@ -8,7 +8,8 @@
#include "GrResourceCache2.h"
-#include "GrGpuResource.h"
+#include "GrGpuResource.h"
+#include "SkRefCnt.h"
GrResourceCache2::~GrResourceCache2() {
this->releaseAll();
@@ -55,3 +56,32 @@ void GrResourceCache2::releaseAll() {
SkASSERT(!fScratchMap.count());
SkASSERT(!fCount);
}
+
+class GrResourceCache2::AvailableForScratchUse {
+public:
+ AvailableForScratchUse(bool calledDuringFlush) : fFlushing(calledDuringFlush) { }
+
+ bool operator()(const GrGpuResource* resource) const {
+ if (fFlushing) {
+ // If this request is coming during draw buffer flush then no refs are allowed
+ // either by drawing code or for pending io operations.
+ // This will be removed when flush no longer creates resources.
+ return resource->reffedOnlyByCache() && !resource->internalHasPendingIO() &&
+ GrGpuResource::kYes_IsScratch == resource->fIsScratch;
+ } else {
+ // Because duties are currently shared between GrResourceCache and GrResourceCache2, the
+ // current interpretation of this rule is that only GrResourceCache has a ref but that
+ // it has been marked as a scratch resource.
+ return resource->reffedOnlyByCache() &&
+ GrGpuResource::kYes_IsScratch == resource->fIsScratch;
+ }
+ }
+private:
+ bool fFlushing;
+};
+
+GrGpuResource* GrResourceCache2::findAndRefScratchResource(const GrResourceKey& scratchKey,
+ bool calledDuringFlush) {
+ SkASSERT(scratchKey.isScratch());
+ return SkSafeRef(fScratchMap.find(scratchKey, AvailableForScratchUse(calledDuringFlush)));
+}
diff --git a/src/gpu/GrResourceCache2.h b/src/gpu/GrResourceCache2.h
index e05efd744e..d48ca0bf6d 100644
--- a/src/gpu/GrResourceCache2.h
+++ b/src/gpu/GrResourceCache2.h
@@ -24,14 +24,17 @@ public:
GrResourceCache2() : fCount(0) {};
~GrResourceCache2();
- void insertResource(GrGpuResource* resource);
+ void insertResource(GrGpuResource*);
- void removeResource(GrGpuResource* resource);
+ void removeResource(GrGpuResource*);
void abandonAll();
void releaseAll();
+ GrGpuResource* findAndRefScratchResource(const GrResourceKey& scratchKey,
+ bool calledDuringFlush);
+
private:
#ifdef SK_DEBUG
bool isInCache(const GrGpuResource* r) const {
@@ -39,8 +42,8 @@ private:
}
#endif
+ class AvailableForScratchUse;
- void removeScratch(const GrGpuResource* resource);
struct ScratchMapTraits {
static const GrResourceKey& GetKey(const GrGpuResource& r) {
return r.getScratchKey();
diff --git a/src/gpu/GrTexture.cpp b/src/gpu/GrTexture.cpp
index e1188f961b..aac91c5858 100644
--- a/src/gpu/GrTexture.cpp
+++ b/src/gpu/GrTexture.cpp
@@ -21,27 +21,6 @@ GrTexture::~GrTexture() {
}
}
-/**
- * This method allows us to interrupt the normal deletion process and place
- * textures back in the texture cache when their ref count goes to zero.
- */
-void GrTexture::internal_dispose() const {
- if (this->texturePriv().isSetFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit) &&
- this->INHERITED::getContext()) {
- GrTexture* nonConstThis = const_cast<GrTexture *>(this);
- this->ref(); // restore ref count to initial setting
-
- nonConstThis->texturePriv().resetFlag((GrTextureFlags) kReturnToCache_FlagBit);
- nonConstThis->INHERITED::getContext()->addExistingTextureToCache(nonConstThis);
-
- // Note: "this" texture might be freed inside addExistingTextureToCache
- // if it is purged.
- return;
- }
-
- this->INHERITED::internal_dispose();
-}
-
void GrTexture::dirtyMipMaps(bool mipMapsDirty) {
if (mipMapsDirty) {
if (kValid_MipMapsStatus == fMipMapsStatus) {
@@ -102,27 +81,12 @@ void GrTexture::writePixels(int left, int top, int width, int height,
pixelOpsFlags);
}
-void GrTexture::abandonReleaseCommon() {
- // In debug builds the resource cache tracks removed/exclusive textures and has an unref'ed ptr.
- // After abandon() or release() the resource cache will be unreachable (getContext() == NULL).
- // So we readd the texture to the cache here so that it is removed from the exclusive list and
- // there is no longer an unref'ed ptr to the texture in the cache.
- if (this->texturePriv().isSetFlag((GrTextureFlags)kReturnToCache_FlagBit)) {
- SkASSERT(!this->wasDestroyed());
- this->ref(); // restores the ref the resource cache gave up when it marked this exclusive.
- this->texturePriv().resetFlag((GrTextureFlags) kReturnToCache_FlagBit);
- this->getContext()->addExistingTextureToCache(this);
- }
-}
-
void GrTexture::onRelease() {
- this->abandonReleaseCommon();
SkASSERT(!this->texturePriv().isSetFlag((GrTextureFlags) kReturnToCache_FlagBit));
INHERITED::onRelease();
}
void GrTexture::onAbandon() {
- this->abandonReleaseCommon();
if (fRenderTarget.get()) {
fRenderTarget->abandon();
}
@@ -187,11 +151,6 @@ GrResourceKey::ResourceFlags get_texture_flags(const GrGpu* gpu,
return flags;
}
-GrResourceKey::ResourceType texture_resource_type() {
- static const GrResourceKey::ResourceType gType = GrResourceKey::GenerateResourceType();
- return gType;
-}
-
// FIXME: This should be refactored with the code in gl/GrGpuGL.cpp.
GrSurfaceOrigin resolve_origin(const GrTextureDesc& desc) {
// By default, GrRenderTargets are GL's normal orientation so that they
@@ -222,7 +181,7 @@ GrResourceKey GrTexturePriv::ComputeKey(const GrGpu* gpu,
const GrTextureDesc& desc,
const GrCacheID& cacheID) {
GrResourceKey::ResourceFlags flags = get_texture_flags(gpu, params, desc);
- return GrResourceKey(cacheID, texture_resource_type(), flags);
+ return GrResourceKey(cacheID, ResourceType(), flags);
}
GrResourceKey GrTexturePriv::ComputeScratchKey(const GrTextureDesc& desc) {
@@ -241,7 +200,7 @@ GrResourceKey GrTexturePriv::ComputeScratchKey(const GrTextureDesc& desc) {
memset(idKey.fData8 + 16, 0, kPadSize);
GrCacheID cacheID(GrResourceKey::ScratchDomain(), idKey);
- return GrResourceKey(cacheID, texture_resource_type(), 0);
+ return GrResourceKey(cacheID, ResourceType(), 0);
}
bool GrTexturePriv::NeedsResizing(const GrResourceKey& key) {
diff --git a/src/gpu/GrTextureAccess.cpp b/src/gpu/GrTextureAccess.cpp
index 662ccd65f8..7e1eda6211 100644
--- a/src/gpu/GrTextureAccess.cpp
+++ b/src/gpu/GrTextureAccess.cpp
@@ -46,7 +46,7 @@ void GrTextureAccess::reset(GrTexture* texture,
SkASSERT(strlen(swizzle) >= 1 && strlen(swizzle) <= 4);
fParams = params;
- fTexture.set(SkRef(texture), GrIORef::kRead_IOType);
+ fTexture.set(SkRef(texture), kRead_GrIOType);
this->setSwizzle(swizzle);
}
@@ -58,14 +58,14 @@ void GrTextureAccess::reset(GrTexture* texture,
SkASSERT(strlen(swizzle) >= 1 && strlen(swizzle) <= 4);
fParams.reset(tileXAndY, filterMode);
- fTexture.set(SkRef(texture), GrIORef::kRead_IOType);
+ fTexture.set(SkRef(texture), kRead_GrIOType);
this->setSwizzle(swizzle);
}
void GrTextureAccess::reset(GrTexture* texture,
const GrTextureParams& params) {
SkASSERT(texture);
- fTexture.set(SkRef(texture), GrIORef::kRead_IOType);
+ fTexture.set(SkRef(texture), kRead_GrIOType);
fParams = params;
memcpy(fSwizzle, "rgba", 5);
fSwizzleMask = kRGBA_GrColorComponentFlags;
@@ -75,7 +75,7 @@ void GrTextureAccess::reset(GrTexture* texture,
GrTextureParams::FilterMode filterMode,
SkShader::TileMode tileXAndY) {
SkASSERT(texture);
- fTexture.set(SkRef(texture), GrIORef::kRead_IOType);
+ fTexture.set(SkRef(texture), kRead_GrIOType);
fParams.reset(tileXAndY, filterMode);
memcpy(fSwizzle, "rgba", 5);
fSwizzleMask = kRGBA_GrColorComponentFlags;
diff --git a/src/gpu/GrTexturePriv.h b/src/gpu/GrTexturePriv.h
index 9a3e0e215b..a2da946987 100644
--- a/src/gpu/GrTexturePriv.h
+++ b/src/gpu/GrTexturePriv.h
@@ -39,6 +39,11 @@ public:
return GrTexture::kNotAllocated_MipMapsStatus != fTexture->fMipMapsStatus;
}
+ static GrResourceKey::ResourceType ResourceType() {
+ static const GrResourceKey::ResourceType gType = GrResourceKey::GenerateResourceType();
+ return gType;
+ }
+
static GrResourceKey ComputeKey(const GrGpu* gpu,
const GrTextureParams* params,
const GrTextureDesc& desc,
diff --git a/src/gpu/SkGpuDevice.cpp b/src/gpu/SkGpuDevice.cpp
index ad01a363f0..6f4d1a6f99 100644
--- a/src/gpu/SkGpuDevice.cpp
+++ b/src/gpu/SkGpuDevice.cpp
@@ -77,50 +77,37 @@ enum { kDefaultImageFilterCacheSize = 32 * 1024 * 1024 };
///////////////////////////////////////////////////////////////////////////////
-
-class SkGpuDevice::SkAutoCachedTexture : public ::SkNoncopyable {
+// Helper for turning a bitmap into a texture. If the bitmap is GrTexture backed this
+// just accesses the backing GrTexture. Otherwise, it creates a cached texture
+// representation and releases it in the destructor.
+class AutoBitmapTexture : public SkNoncopyable {
public:
- SkAutoCachedTexture()
- : fDevice(NULL)
- , fTexture(NULL) {
- }
+ AutoBitmapTexture() {}
- SkAutoCachedTexture(SkGpuDevice* device,
- const SkBitmap& bitmap,
- const GrTextureParams* params,
- GrTexture** texture)
- : fDevice(NULL)
- , fTexture(NULL) {
+ AutoBitmapTexture(GrContext* context,
+ const SkBitmap& bitmap,
+ const GrTextureParams* params,
+ GrTexture** texture) {
SkASSERT(texture);
- *texture = this->set(device, bitmap, params);
+ *texture = this->set(context, bitmap, params);
}
- ~SkAutoCachedTexture() {
- if (fTexture) {
- GrUnlockAndUnrefCachedBitmapTexture(fTexture);
- }
- }
-
- GrTexture* set(SkGpuDevice* device,
+ GrTexture* set(GrContext* context,
const SkBitmap& bitmap,
const GrTextureParams* params) {
- if (fTexture) {
- GrUnlockAndUnrefCachedBitmapTexture(fTexture);
- fTexture = NULL;
- }
- fDevice = device;
- GrTexture* result = (GrTexture*)bitmap.getTexture();
- if (NULL == result) {
- // Cannot return the native texture so look it up in our cache
- fTexture = GrLockAndRefCachedBitmapTexture(device->context(), bitmap, params);
- result = fTexture;
+ // Either get the texture directly from the bitmap, or else use the cache and
+ // remember to unref it.
+ if (GrTexture* bmpTexture = bitmap.getTexture()) {
+ fTexture.reset(NULL);
+ return bmpTexture;
+ } else {
+ fTexture.reset(GrRefCachedBitmapTexture(context, bitmap, params));
+ return fTexture.get();
}
- return result;
}
private:
- SkGpuDevice* fDevice;
- GrTexture* fTexture;
+ SkAutoTUnref<GrTexture> fTexture;
};
///////////////////////////////////////////////////////////////////////////////
@@ -153,8 +140,7 @@ SkGpuDevice::SkGpuDevice(GrSurface* surface, const SkSurfaceProps& props, unsign
fRenderTarget = SkRef(surface->asRenderTarget());
SkImageInfo info = surface->surfacePriv().info();
- SkPixelRef* pr = SkNEW_ARGS(SkGrPixelRef,
- (info, surface, SkToBool(flags & kCached_Flag)));
+ SkPixelRef* pr = SkNEW_ARGS(SkGrPixelRef, (info, surface));
fLegacyBitmap.setInfo(info);
fLegacyBitmap.setPixelRef(pr)->unref();
@@ -1299,7 +1285,7 @@ void SkGpuDevice::internalDrawBitmap(const SkBitmap& bitmap,
bitmap.height() <= fContext->getMaxTextureSize());
GrTexture* texture;
- SkAutoCachedTexture act(this, bitmap, &params, &texture);
+ AutoBitmapTexture abt(fContext, bitmap, &params, &texture);
if (NULL == texture) {
return;
}
@@ -1394,7 +1380,7 @@ void SkGpuDevice::drawSprite(const SkDraw& draw, const SkBitmap& bitmap,
GrTexture* texture;
// draw sprite uses the default texture params
- SkAutoCachedTexture act(this, bitmap, NULL, &texture);
+ AutoBitmapTexture abt(fContext, bitmap, NULL, &texture);
SkImageFilter* filter = paint.getImageFilter();
// This bitmap will own the filtered result as a texture.
@@ -1571,7 +1557,7 @@ bool SkGpuDevice::filterImage(const SkImageFilter* filter, const SkBitmap& src,
GrTexture* texture;
// We assume here that the filter will not attempt to tile the src. Otherwise, this cache lookup
// must be pushed upstack.
- SkAutoCachedTexture act(this, src, NULL, &texture);
+ AutoBitmapTexture abt(fContext, src, NULL, &texture);
return filter_texture(this, fContext, texture, filter, src.width(), src.height(), ctx,
result, offset);
@@ -1802,7 +1788,6 @@ SkBaseDevice* SkGpuDevice::onCreateDevice(const SkImageInfo& info, Usage usage)
#if CACHE_COMPATIBLE_DEVICE_TEXTURES
// layers are never draw in repeat modes, so we can request an approx
// match and ignore any padding.
- flags |= kCached_Flag;
const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ?
GrContext::kApprox_ScratchTexMatch :
GrContext::kExact_ScratchTexMatch;
diff --git a/src/gpu/SkGpuDevice.h b/src/gpu/SkGpuDevice.h
index 285a73745c..fe03e32f73 100644
--- a/src/gpu/SkGpuDevice.h
+++ b/src/gpu/SkGpuDevice.h
@@ -34,8 +34,7 @@ class SK_API SkGpuDevice : public SkBaseDevice {
public:
enum Flags {
kNeedClear_Flag = 1 << 0, //!< Surface requires an initial clear
- kCached_Flag = 1 << 1, //!< Surface is cached and needs to be unlocked when released
- kDFFonts_Flag = 1 << 2, //!< Surface should render fonts using signed distance fields
+ kDFFonts_Flag = 1 << 1, //!< Surface should render fonts using signed distance fields
};
/**
@@ -117,9 +116,6 @@ public:
const SkImageFilter::Context&,
SkBitmap*, SkIPoint*) SK_OVERRIDE;
- class SkAutoCachedTexture; // used internally
-
-
protected:
virtual bool onReadPixels(const SkImageInfo&, void*, size_t, int, int) SK_OVERRIDE;
virtual bool onWritePixels(const SkImageInfo&, const void*, size_t, int, int) SK_OVERRIDE;
diff --git a/src/gpu/SkGr.cpp b/src/gpu/SkGr.cpp
index e81abdbae4..d9435459f1 100644
--- a/src/gpu/SkGr.cpp
+++ b/src/gpu/SkGr.cpp
@@ -369,9 +369,9 @@ bool GrIsBitmapInCache(const GrContext* ctx,
return ctx->isTextureInCache(desc, cacheID, params);
}
-GrTexture* GrLockAndRefCachedBitmapTexture(GrContext* ctx,
- const SkBitmap& bitmap,
- const GrTextureParams* params) {
+GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
+ const SkBitmap& bitmap,
+ const GrTextureParams* params) {
GrTexture* result = NULL;
bool cache = !bitmap.isVolatile();
@@ -397,13 +397,6 @@ GrTexture* GrLockAndRefCachedBitmapTexture(GrContext* ctx,
return result;
}
-void GrUnlockAndUnrefCachedBitmapTexture(GrTexture* texture) {
- SkASSERT(texture->getContext());
-
- texture->getContext()->unlockScratchTexture(texture);
- texture->unref();
-}
-
///////////////////////////////////////////////////////////////////////////////
// alphatype is ignore for now, but if GrPixelConfig is expanded to encompass
diff --git a/src/gpu/SkGrPixelRef.cpp b/src/gpu/SkGrPixelRef.cpp
index 489a418ce6..448f2d3931 100644
--- a/src/gpu/SkGrPixelRef.cpp
+++ b/src/gpu/SkGrPixelRef.cpp
@@ -99,8 +99,7 @@ static SkGrPixelRef* copy_to_new_texture_pixelref(GrTexture* texture, SkColorTyp
///////////////////////////////////////////////////////////////////////////////
-SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface,
- bool transferCacheLock) : INHERITED(info) {
+SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface) : INHERITED(info) {
// For surfaces that are both textures and render targets, the texture owns the
// render target but not vice versa. So we ref the texture to keep both alive for
// the lifetime of this pixel ref.
@@ -108,7 +107,6 @@ SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface,
if (NULL == fSurface) {
fSurface = SkSafeRef(surface);
}
- fUnlock = transferCacheLock;
if (fSurface) {
SkASSERT(info.width() <= fSurface->width());
@@ -117,13 +115,6 @@ SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface,
}
SkGrPixelRef::~SkGrPixelRef() {
- if (fUnlock) {
- GrContext* context = fSurface->getContext();
- GrTexture* texture = fSurface->asTexture();
- if (context && texture) {
- context->unlockScratchTexture(texture);
- }
- }
SkSafeUnref(fSurface);
}
diff --git a/src/gpu/effects/GrTextureStripAtlas.cpp b/src/gpu/effects/GrTextureStripAtlas.cpp
index 91df897eb8..9755ccd2af 100644
--- a/src/gpu/effects/GrTextureStripAtlas.cpp
+++ b/src/gpu/effects/GrTextureStripAtlas.cpp
@@ -216,7 +216,6 @@ void GrTextureStripAtlas::unlockTexture() {
SkASSERT(fTexture && 0 == fLockedRows);
fTexture->unref();
fTexture = NULL;
- fDesc.fContext->purgeCache();
}
void GrTextureStripAtlas::initLRU() {
diff --git a/src/image/SkSurface_Gpu.cpp b/src/image/SkSurface_Gpu.cpp
index fb087ea24b..e6fd080235 100644
--- a/src/image/SkSurface_Gpu.cpp
+++ b/src/image/SkSurface_Gpu.cpp
@@ -14,7 +14,7 @@ class SkSurface_Gpu : public SkSurface_Base {
public:
SK_DECLARE_INST_COUNT(SkSurface_Gpu)
- SkSurface_Gpu(GrRenderTarget*, bool cached, const SkSurfaceProps*, bool doClear);
+ SkSurface_Gpu(GrRenderTarget*, const SkSurfaceProps*, bool doClear);
virtual ~SkSurface_Gpu();
virtual SkCanvas* onNewCanvas() SK_OVERRIDE;
@@ -33,12 +33,10 @@ private:
///////////////////////////////////////////////////////////////////////////////
-SkSurface_Gpu::SkSurface_Gpu(GrRenderTarget* renderTarget, bool cached, const SkSurfaceProps* props,
+SkSurface_Gpu::SkSurface_Gpu(GrRenderTarget* renderTarget, const SkSurfaceProps* props,
bool doClear)
- : INHERITED(renderTarget->width(), renderTarget->height(), props)
-{
+ : INHERITED(renderTarget->width(), renderTarget->height(), props) {
int deviceFlags = 0;
- deviceFlags |= cached ? SkGpuDevice::kCached_Flag : 0;
deviceFlags |= this->props().isUseDistanceFieldFonts() ? SkGpuDevice::kDFFonts_Flag : 0;
fDevice = SkGpuDevice::Create(renderTarget, this->props(), deviceFlags);
@@ -111,7 +109,7 @@ SkSurface* SkSurface::NewRenderTargetDirect(GrRenderTarget* target, const SkSurf
if (NULL == target) {
return NULL;
}
- return SkNEW_ARGS(SkSurface_Gpu, (target, false, props, false));
+ return SkNEW_ARGS(SkSurface_Gpu, (target, props, false));
}
SkSurface* SkSurface::NewRenderTarget(GrContext* ctx, const SkImageInfo& info, int sampleCount,
@@ -132,7 +130,7 @@ SkSurface* SkSurface::NewRenderTarget(GrContext* ctx, const SkImageInfo& info, i
return NULL;
}
- return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), false, props, true));
+ return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), props, true));
}
SkSurface* SkSurface::NewScratchRenderTarget(GrContext* ctx, const SkImageInfo& info,
@@ -154,5 +152,5 @@ SkSurface* SkSurface::NewScratchRenderTarget(GrContext* ctx, const SkImageInfo&
return NULL;
}
- return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), true, props, true));
+ return SkNEW_ARGS(SkSurface_Gpu, (tex->asRenderTarget(), props, true));
}
diff --git a/tests/ResourceCacheTest.cpp b/tests/ResourceCacheTest.cpp
index f9f94d1b27..00f4e85000 100644
--- a/tests/ResourceCacheTest.cpp
+++ b/tests/ResourceCacheTest.cpp
@@ -7,9 +7,11 @@
#if SK_SUPPORT_GPU
-#include "SkCanvas.h"
+#include "GrContext.h"
#include "GrContextFactory.h"
+#include "GrGpu.h"
#include "GrResourceCache.h"
+#include "SkCanvas.h"
#include "SkSurface.h"
#include "Test.h"
@@ -115,7 +117,7 @@ static void test_purge_invalidated(skiatest::Reporter* reporter, GrContext* cont
GrResourceKey::ResourceType t = GrResourceKey::GenerateResourceType();
GrResourceKey key(GrCacheID(domain, keyData), t, 0);
- GrResourceCache cache(5, 30000);
+ GrResourceCache cache(context->getGpu()->caps(), 5, 30000);
// Add two resources with the same key that delete each other from the cache when destroyed.
TestResource* a = new TestResource(context->getGpu());
@@ -153,7 +155,7 @@ static void test_cache_delete_on_destruction(skiatest::Reporter* reporter,
{
{
- GrResourceCache cache(3, 30000);
+ GrResourceCache cache(context->getGpu()->caps(), 3, 30000);
TestResource* a = new TestResource(context->getGpu());
TestResource* b = new TestResource(context->getGpu());
cache.addResource(key, a);
@@ -169,7 +171,7 @@ static void test_cache_delete_on_destruction(skiatest::Reporter* reporter,
REPORTER_ASSERT(reporter, 0 == TestResource::alive());
}
{
- GrResourceCache cache(3, 30000);
+ GrResourceCache cache(context->getGpu()->caps(), 3, 30000);
TestResource* a = new TestResource(context->getGpu());
TestResource* b = new TestResource(context->getGpu());
cache.addResource(key, a);
@@ -204,7 +206,7 @@ static void test_resource_size_changed(skiatest::Reporter* reporter,
// Test changing resources sizes (both increase & decrease).
{
- GrResourceCache cache(2, 300);
+ GrResourceCache cache(context->getGpu()->caps(), 2, 300);
TestResource* a = new TestResource(context->getGpu());
a->setSize(100); // Test didChangeGpuMemorySize() when not in the cache.
@@ -228,7 +230,7 @@ static void test_resource_size_changed(skiatest::Reporter* reporter,
// Test increasing a resources size beyond the cache budget.
{
- GrResourceCache cache(2, 300);
+ GrResourceCache cache(context->getGpu()->caps(), 2, 300);
TestResource* a = new TestResource(context->getGpu(), 100);
cache.addResource(key1, a);
@@ -247,37 +249,6 @@ static void test_resource_size_changed(skiatest::Reporter* reporter,
REPORTER_ASSERT(reporter, 201 == cache.getCachedResourceBytes());
REPORTER_ASSERT(reporter, 1 == cache.getCachedResourceCount());
}
-
- // Test changing the size of an exclusively-held resource.
- {
- GrResourceCache cache(2, 300);
-
- TestResource* a = new TestResource(context->getGpu(), 100);
- cache.addResource(key1, a);
- cache.makeExclusive(a->getCacheEntry());
-
- TestResource* b = new TestResource(context->getGpu(), 100);
- cache.addResource(key2, b);
- b->unref();
-
- REPORTER_ASSERT(reporter, 200 == cache.getCachedResourceBytes());
- REPORTER_ASSERT(reporter, 2 == cache.getCachedResourceCount());
- REPORTER_ASSERT(reporter, NULL == cache.find(key1));
-
- a->setSize(200);
-
- REPORTER_ASSERT(reporter, 300 == cache.getCachedResourceBytes());
- REPORTER_ASSERT(reporter, 2 == cache.getCachedResourceCount());
- // Internal resource cache validation will test the detached size (debug mode only).
-
- cache.makeNonExclusive(a->getCacheEntry());
- a->unref();
-
- REPORTER_ASSERT(reporter, 300 == cache.getCachedResourceBytes());
- REPORTER_ASSERT(reporter, 2 == cache.getCachedResourceCount());
- REPORTER_ASSERT(reporter, cache.find(key1));
- // Internal resource cache validation will test the detached size (debug mode only).
- }
}
////////////////////////////////////////////////////////////////////////////////