diff options
author | bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2012-12-20 14:18:10 +0000 |
---|---|---|
committer | bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2012-12-20 14:18:10 +0000 |
commit | 4b86e3428b115202e82d49a0914ea8ab6dc25940 (patch) | |
tree | d519732030f8abec0f9e8a31b4ce87c1035cfe3e /src | |
parent | 9532953aa11289aea7c1fbd1438adca61a34bb24 (diff) |
Simplify cache IDs and keys.
R=robertphillips@google.com
Review URL: https://codereview.appspot.com/6954047
git-svn-id: http://skia.googlecode.com/svn/trunk@6914 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'src')
-rw-r--r-- | src/gpu/GrBinHashKey.h | 37 | ||||
-rw-r--r-- | src/gpu/GrCacheID.cpp | 39 | ||||
-rw-r--r-- | src/gpu/GrClipMaskManager.cpp | 3 | ||||
-rw-r--r-- | src/gpu/GrClipMaskManager.h | 2 | ||||
-rw-r--r-- | src/gpu/GrContext.cpp | 45 | ||||
-rw-r--r-- | src/gpu/GrResourceCache.cpp | 44 | ||||
-rw-r--r-- | src/gpu/GrResourceCache.h | 176 | ||||
-rw-r--r-- | src/gpu/GrStencilBuffer.cpp | 39 | ||||
-rw-r--r-- | src/gpu/GrStencilBuffer.h | 2 | ||||
-rw-r--r-- | src/gpu/GrTHashCache.h | 7 | ||||
-rw-r--r-- | src/gpu/GrTexture.cpp | 115 | ||||
-rw-r--r-- | src/gpu/SkGpuDevice.cpp | 26 | ||||
-rw-r--r-- | src/gpu/SkGr.cpp | 94 | ||||
-rw-r--r-- | src/gpu/effects/GrTextureStripAtlas.cpp | 18 | ||||
-rw-r--r-- | src/gpu/effects/GrTextureStripAtlas.h | 4 |
15 files changed, 313 insertions, 338 deletions
diff --git a/src/gpu/GrBinHashKey.h b/src/gpu/GrBinHashKey.h index d2194e9471..8fa53ef68a 100644 --- a/src/gpu/GrBinHashKey.h +++ b/src/gpu/GrBinHashKey.h @@ -16,25 +16,27 @@ * Hash function class that can take a data chunk of any predetermined length. The hash function * used is the One-at-a-Time Hash (http://burtleburtle.net/bob/hash/doobs.html). * - * Keys are computed from Entry objects. Entry must be fully ordered by a member: - * int compare(const GrTBinHashKey<Entry, ..>& k); - * which returns negative if the Entry < k, 0 if it equals k, and positive if k < the Entry. - * Additionally, Entry must be flattenable into the key using setKeyData. + * Keys are computed from ENTRY objects. ENTRY must be fully ordered by a member: + * int compare(const GrTBinHashKey<ENTRY, ..>& k); + * which returns negative if the ENTRY < k, 0 if it equals k, and positive if k < the ENTRY. + * Additionally, ENTRY must be flattenable into the key using setKeyData. * * This class satisfies the requirements to be a key for a GrTHashTable. */ -template<typename Entry, size_t KeySize> +template<typename ENTRY, size_t KEY_SIZE> class GrTBinHashKey { public: + enum { kKeySize = KEY_SIZE }; + GrTBinHashKey() { this->reset(); } - GrTBinHashKey(const GrTBinHashKey<Entry, KeySize>& other) { + GrTBinHashKey(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) { *this = other; } - GrTBinHashKey<Entry, KeySize>& operator=(const GrTBinHashKey<Entry, KeySize>& other) { + GrTBinHashKey<ENTRY, KEY_SIZE>& operator=(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) { memcpy(this, &other, sizeof(*this)); return *this; } @@ -50,11 +52,11 @@ public: } void setKeyData(const uint32_t* SK_RESTRICT data) { - GrAssert(GrIsALIGN4(KeySize)); - memcpy(&fData, data, KeySize); + GrAssert(GrIsALIGN4(KEY_SIZE)); + memcpy(&fData, data, KEY_SIZE); uint32_t hash = 0; - size_t len = KeySize; + size_t len = KEY_SIZE; while (len >= 4) { hash += *data++; hash += (fHash << 10); @@ -70,17 +72,17 @@ public: fHash = hash; } - int compare(const GrTBinHashKey<Entry, KeySize>& key) const { + int compare(const GrTBinHashKey<ENTRY, KEY_SIZE>& key) const { GrAssert(fIsValid && key.fIsValid); - return memcmp(fData, key.fData, KeySize); + return memcmp(fData, key.fData, KEY_SIZE); } - static bool EQ(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) { + static bool EQ(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) { GrAssert(key.fIsValid); return 0 == entry.compare(key); } - static bool LT(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) { + static bool LT(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) { GrAssert(key.fIsValid); return entry.compare(key) < 0; } @@ -90,9 +92,14 @@ public: return fHash; } + const uint8_t* getData() const { + GrAssert(fIsValid); + return fData; + } + private: uint32_t fHash; - uint8_t fData[KeySize]; // Buffer for key storage + uint8_t fData[KEY_SIZE]; // Buffer for key storage #if GR_DEBUG public: diff --git a/src/gpu/GrCacheID.cpp b/src/gpu/GrCacheID.cpp index 4c6dd492c3..ce47af3e3f 100644 --- a/src/gpu/GrCacheID.cpp +++ b/src/gpu/GrCacheID.cpp @@ -5,40 +5,21 @@ * found in the LICENSE file. */ -#include "GrCacheID.h" +#include "GrTypes.h" #include "SkThread.h" // for sk_atomic_inc -uint8_t GrCacheID::GetNextDomain() { - // 0 reserved for kUnrestricted_ResourceDomain - static int32_t gNextDomain = 1; +static const GrCacheID::Key kAssertKey; +GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData32)); +GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData64)); +GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey)); + +GrCacheID::Domain GrCacheID::GenerateDomain() { + static int32_t gNextDomain = kInvalid_Domain + 1; int32_t domain = sk_atomic_inc(&gNextDomain); - if (domain >= 256) { + if (domain >= 1 << (8 * sizeof(Domain))) { GrCrash("Too many Cache Domains"); } - return (uint8_t) domain; -} - -uint8_t GrCacheID::GetNextResourceType() { - // 0 reserved for kInvalid_ResourceType - static int32_t gNextResourceType = 1; - - int32_t type = sk_atomic_inc(&gNextResourceType); - if (type >= 256) { - GrCrash("Too many Cache Resource Types"); - } - - return (uint8_t) type; -} - -void GrCacheID::toRaw(uint32_t v[4]) { - GrAssert(4*sizeof(uint32_t) == sizeof(GrCacheID)); - - v[0] = (uint32_t) (fPublicID & 0xffffffffUL); - v[1] = (uint32_t) ((fPublicID >> 32) & 0xffffffffUL); - v[2] = fResourceSpecific32; - v[3] = fDomain << 24 | - fResourceType << 16 | - fResourceSpecific16; + return static_cast<Domain>(domain); } diff --git a/src/gpu/GrClipMaskManager.cpp b/src/gpu/GrClipMaskManager.cpp index a596088ccf..7eab0ce68f 100644 --- a/src/gpu/GrClipMaskManager.cpp +++ b/src/gpu/GrClipMaskManager.cpp @@ -18,12 +18,9 @@ #include "GrAAConvexPathRenderer.h" #include "GrAAHairLinePathRenderer.h" #include "GrSWMaskHelper.h" -#include "GrCacheID.h" #include "SkTLazy.h" -GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrClipMaskManager, GetAlphaMaskDomain) - #define GR_AA_CLIP 1 typedef SkClipStack::Element Element; diff --git a/src/gpu/GrClipMaskManager.h b/src/gpu/GrClipMaskManager.h index 48d17e1fdc..534689a9bb 100644 --- a/src/gpu/GrClipMaskManager.h +++ b/src/gpu/GrClipMaskManager.h @@ -41,8 +41,6 @@ class GrDrawState; */ class GrClipMaskManager : public GrNoncopyable { public: - GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetAlphaMaskDomain) - GrClipMaskManager() : fGpu(NULL) , fCurrClipMaskType(kNone_ClipMaskType) { diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp index d00e062fae..12fbeb6e31 100644 --- a/src/gpu/GrContext.cpp +++ b/src/gpu/GrContext.cpp @@ -209,18 +209,20 @@ void convolve_gaussian(GrDrawTarget* target, } +//////////////////////////////////////////////////////////////////////////////// + GrTexture* GrContext::findTexture(const GrTextureDesc& desc, - const GrCacheData& cacheData, + const GrCacheID& cacheID, const GrTextureParams* params) { - GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false); + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); GrResource* resource = fTextureCache->find(resourceKey); return static_cast<GrTexture*>(resource); } bool GrContext::isTextureInCache(const GrTextureDesc& desc, - const GrCacheData& cacheData, + const GrCacheID& cacheID, const GrTextureParams* params) const { - GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false); + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); return fTextureCache->hasKey(resourceKey); } @@ -272,13 +274,13 @@ static void stretchImage(void* dst, // The desired texture is NPOT and tiled but that isn't supported by // the current hardware. Resize the texture to be a POT GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, - const GrCacheData& cacheData, + const GrCacheID& cacheID, void* srcData, size_t rowBytes, bool needsFiltering) { - GrTexture* clampedTexture = this->findTexture(desc, cacheData, NULL); + GrTexture* clampedTexture = this->findTexture(desc, cacheID, NULL); if (NULL == clampedTexture) { - clampedTexture = this->createTexture(NULL, desc, cacheData, srcData, rowBytes); + clampedTexture = this->createTexture(NULL, desc, cacheID, srcData, rowBytes); if (NULL == clampedTexture) { return NULL; @@ -350,22 +352,22 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, GrTexture* GrContext::createTexture( const GrTextureParams* params, const GrTextureDesc& desc, - const GrCacheData& cacheData, + const GrCacheID& cacheID, void* srcData, size_t rowBytes) { - SK_TRACE_EVENT0("GrContext::createAndLockTexture"); + SK_TRACE_EVENT0("GrContext::createTexture"); #if GR_DUMP_TEXTURE_UPLOAD - GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); + GrPrintf("GrContext::createTexture[%d %d]\n", desc.fWidth, desc.fHeight); #endif - GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false); + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); SkAutoTUnref<GrTexture> texture; if (GrTexture::NeedsResizing(resourceKey)) { - texture.reset(this->createResizedTexture(desc, cacheData, - srcData, rowBytes, - GrTexture::NeedsFiltering(resourceKey))); + texture.reset(this->createResizedTexture(desc, cacheID, + srcData, rowBytes, + GrTexture::NeedsFiltering(resourceKey))); } else { texture.reset(fGpu->createTexture(desc, srcData, rowBytes)); } @@ -377,15 +379,13 @@ GrTexture* GrContext::createTexture( return texture; } -GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, - ScratchTexMatch match) { +GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) { GrTextureDesc desc = inDesc; - GrCacheData cacheData(GrCacheData::kScratch_CacheID); GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) || !(desc.fFlags & kNoStencil_GrTextureFlagBit)); - if (kExact_ScratchTexMatch != match) { + if (kApprox_ScratchTexMatch == match) { // bin by pow2 with a reasonable min static const int MIN_SIZE = 256; desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); @@ -399,7 +399,7 @@ GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, bool doubledH = false; do { - GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, cacheData, true); + GrResourceKey key = GrTexture::ComputeScratchKey(desc); // Ensure we have exclusive access to the texture so future 'find' calls don't return it resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag); // if we miss, relax the fit of the flags... @@ -433,10 +433,7 @@ GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, desc.fHeight = origHeight; SkAutoTUnref<GrTexture> texture(fGpu->createTexture(desc, NULL, 0)); if (NULL != texture) { - GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, - texture->desc(), - cacheData, - true); + GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc()); // Make the resource exclusive so future 'find' calls don't return it fTextureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag); resource = texture; @@ -475,7 +472,7 @@ void GrContext::unlockScratchTexture(GrTexture* texture) { // If this is a scratch texture we detached it from the cache // while it was locked (to avoid two callers simultaneously getting // the same texture). - if (GrTexture::IsScratchTexture(texture->getCacheEntry()->key())) { + if (texture->getCacheEntry()->key().isScratch()) { fTextureCache->makeNonExclusive(texture->getCacheEntry()); } diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp index a1f1d794e5..45b999f3d0 100644 --- a/src/gpu/GrResourceCache.cpp +++ b/src/gpu/GrResourceCache.cpp @@ -11,6 +11,20 @@ #include "GrResourceCache.h" #include "GrResource.h" + +GrResourceKey::ResourceType GrResourceKey::GenerateResourceType() { + static int32_t gNextType = 0; + + int32_t type = sk_atomic_inc(&gNextType); + if (type >= (1 << 8 * sizeof(ResourceType))) { + GrCrash("Too many Resource Types"); + } + + return static_cast<ResourceType>(type); +} + +/////////////////////////////////////////////////////////////////////////////// + GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource) : fKey(key), fResource(resource) { // we assume ownership of the resource, and will unref it when we die @@ -33,36 +47,6 @@ void GrResourceEntry::validate() const { /////////////////////////////////////////////////////////////////////////////// -class GrResourceCache::Key { - typedef GrResourceEntry T; - - const GrResourceKey& fKey; -public: - Key(const GrResourceKey& key) : fKey(key) {} - - uint32_t getHash() const { return fKey.hashIndex(); } - - static bool LT(const T& entry, const Key& key) { - return entry.key() < key.fKey; - } - static bool EQ(const T& entry, const Key& key) { - return entry.key() == key.fKey; - } -#if GR_DEBUG - static uint32_t GetHash(const T& entry) { - return entry.key().hashIndex(); - } - static bool LT(const T& a, const T& b) { - return a.key() < b.key(); - } - static bool EQ(const T& a, const T& b) { - return a.key() == b.key(); - } -#endif -}; - -/////////////////////////////////////////////////////////////////////////////// - GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) : fMaxCount(maxCount), fMaxBytes(maxBytes) { diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h index e1207a204b..b4c9df7ad2 100644 --- a/src/gpu/GrResourceCache.h +++ b/src/gpu/GrResourceCache.h @@ -14,27 +14,12 @@ #include "GrConfig.h" #include "GrTypes.h" #include "GrTHashCache.h" +#include "GrBinHashKey.h" #include "SkTInternalLList.h" class GrResource; +class GrResourceEntry; -// return true if a<b, or false if b<a -// -#define RET_IF_LT_OR_GT(a, b) \ - do { \ - if ((a) < (b)) { \ - return true; \ - } \ - if ((b) < (a)) { \ - return false; \ - } \ - } while (0) - -/** - * Helper class for GrResourceCache, the Key is used to identify src data for - * a resource. It is identified by 2 32bit data fields which can hold any - * data (uninterpreted by the cache) and a width/height. - */ class GrResourceKey { public: enum { @@ -43,82 +28,118 @@ public: kHashMask = kHashCount - 1 }; - GrResourceKey(uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) { - fP[0] = p0; - fP[1] = p1; - fP[2] = p2; - fP[3] = p3; - this->computeHashIndex(); + static GrCacheID::Domain ScratchDomain() { + static const GrCacheID::Domain gDomain = GrCacheID::GenerateDomain(); + return gDomain; } - GrResourceKey(uint32_t v[4]) { - memcpy(fP, v, 4 * sizeof(uint32_t)); - this->computeHashIndex(); - } + /** Uniquely identifies the GrResource subclass in the key to avoid collisions + across resource types. */ + typedef uint8_t ResourceType; + + /** Flags set by the GrResource subclass. */ + typedef uint8_t ResourceFlags; + + /** Generate a unique ResourceType */ + static ResourceType GenerateResourceType(); + + /** Creates a key for resource */ + GrResourceKey(const GrCacheID& id, ResourceType type, ResourceFlags flags) { + this->init(id.getDomain(), id.getKey(), type, flags); + }; GrResourceKey(const GrResourceKey& src) { - memcpy(fP, src.fP, 4 * sizeof(uint32_t)); -#if GR_DEBUG - this->computeHashIndex(); - GrAssert(fHashIndex == src.fHashIndex); -#endif - fHashIndex = src.fHashIndex; + fKey = src.fKey; } - //!< returns hash value [0..kHashMask] for the key - int hashIndex() const { return fHashIndex; } + GrResourceKey() { + fKey.fHashedKey.reset(); + } - friend bool operator==(const GrResourceKey& a, const GrResourceKey& b) { - GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex); - return 0 == memcmp(a.fP, b.fP, 4 * sizeof(uint32_t)); + void reset(const GrCacheID& id, ResourceType type, ResourceFlags flags) { + this->init(id.getDomain(), id.getKey(), type, flags); } - friend bool operator!=(const GrResourceKey& a, const GrResourceKey& b) { - GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex); - return !(a == b); + //!< returns hash value [0..kHashMask] for the key + int getHash() const { + return fKey.fHashedKey.getHash() & kHashMask; } - friend bool operator<(const GrResourceKey& a, const GrResourceKey& b) { - RET_IF_LT_OR_GT(a.fP[0], b.fP[0]); - RET_IF_LT_OR_GT(a.fP[1], b.fP[1]); - RET_IF_LT_OR_GT(a.fP[2], b.fP[2]); - return a.fP[3] < b.fP[3]; + bool isScratch() const { + return ScratchDomain() == + *reinterpret_cast<const GrCacheID::Domain*>(fKey.fHashedKey.getData() + + kCacheIDDomainOffset); } - uint32_t getValue32(int i) const { - GrAssert(i >=0 && i < 4); - return fP[i]; + ResourceType getResourceType() const { + return *reinterpret_cast<const ResourceType*>(fKey.fHashedKey.getData() + + kResourceTypeOffset); + } + + ResourceFlags getResourceFlags() const { + return *reinterpret_cast<const ResourceFlags*>(fKey.fHashedKey.getData() + + kResourceFlagsOffset); } -private: - static uint32_t rol(uint32_t x) { - return (x >> 24) | (x << 8); + int compare(const GrResourceKey& other) const { + return fKey.fHashedKey.compare(other.fKey.fHashedKey); } - static uint32_t ror(uint32_t x) { - return (x >> 8) | (x << 24); + + static bool LT(const GrResourceKey& a, const GrResourceKey& b) { + return a.compare(b) < 0; } - static uint32_t rohalf(uint32_t x) { - return (x >> 16) | (x << 16); + + static bool EQ(const GrResourceKey& a, const GrResourceKey& b) { + return 0 == a.compare(b); } - void computeHashIndex() { - uint32_t hash = fP[0] ^ rol(fP[1]) ^ ror(fP[2]) ^ rohalf(fP[3]); - // this way to mix and reduce hash to its index may have to change - // depending on how many bits we allocate to the index - hash ^= hash >> 16; - hash ^= hash >> 8; - fHashIndex = hash & kHashMask; + inline static bool LT(const GrResourceEntry& entry, const GrResourceKey& key); + inline static bool EQ(const GrResourceEntry& entry, const GrResourceKey& key); + inline static bool LT(const GrResourceEntry& a, const GrResourceEntry& b); + inline static bool EQ(const GrResourceEntry& a, const GrResourceEntry& b); + +private: + enum { + kCacheIDKeyOffset = 0, + kCacheIDDomainOffset = kCacheIDKeyOffset + sizeof(GrCacheID::Key), + kResourceTypeOffset = kCacheIDDomainOffset + sizeof(GrCacheID::Domain), + kResourceFlagsOffset = kResourceTypeOffset + sizeof(ResourceType), + kPadOffset = kResourceFlagsOffset + sizeof(ResourceFlags), + kKeySize = SkAlign4(kPadOffset), + kPadSize = kKeySize - kPadOffset + }; + + void init(const GrCacheID::Domain domain, + const GrCacheID::Key& key, + ResourceType type, + ResourceFlags flags) { + union { + uint8_t fKey8[kKeySize]; + uint32_t fKey32[kKeySize / 4]; + } keyData; + + uint8_t* k = keyData.fKey8; + memcpy(k + kCacheIDKeyOffset, key.fData8, sizeof(GrCacheID::Key)); + memcpy(k + kCacheIDDomainOffset, &domain, sizeof(GrCacheID::Domain)); + memcpy(k + kResourceTypeOffset, &type, sizeof(ResourceType)); + memcpy(k + kResourceFlagsOffset, &flags, sizeof(ResourceFlags)); + memset(k + kPadOffset, 0, kPadSize); + fKey.fHashedKey.setKeyData(keyData.fKey32); } - uint32_t fP[4]; + struct Key; + typedef GrTBinHashKey<Key, kKeySize> HashedKey; - // this is computed from the fP... fields - int fHashIndex; + struct Key { + int compare(const HashedKey& hashedKey) const { + fHashedKey.compare(fHashedKey); + } + HashedKey fHashedKey; + }; - friend class GrContext; + Key fKey; }; - /////////////////////////////////////////////////////////////////////////////// class GrResourceEntry { @@ -146,6 +167,22 @@ private: friend class GrDLinkedList; }; +bool GrResourceKey::LT(const GrResourceEntry& entry, const GrResourceKey& key) { + return LT(entry.key(), key); +} + +bool GrResourceKey::EQ(const GrResourceEntry& entry, const GrResourceKey& key) { + return EQ(entry.key(), key); +} + +bool GrResourceKey::LT(const GrResourceEntry& a, const GrResourceEntry& b) { + return LT(a.key(), b.key()); +} + +bool GrResourceKey::EQ(const GrResourceEntry& a, const GrResourceEntry& b) { + return EQ(a.key(), b.key()); +} + /////////////////////////////////////////////////////////////////////////////// #include "GrTHashCache.h" @@ -289,8 +326,7 @@ private: void removeInvalidResource(GrResourceEntry* entry); - class Key; - GrTHashTable<GrResourceEntry, Key, 8> fCache; + GrTHashTable<GrResourceEntry, GrResourceKey, 8> fCache; // We're an internal doubly linked list typedef SkTInternalLList<GrResourceEntry> EntryList; diff --git a/src/gpu/GrStencilBuffer.cpp b/src/gpu/GrStencilBuffer.cpp index 180912e02d..623861df8f 100644 --- a/src/gpu/GrStencilBuffer.cpp +++ b/src/gpu/GrStencilBuffer.cpp @@ -13,7 +13,6 @@ #include "GrResourceCache.h" SK_DEFINE_INST_COUNT(GrStencilBuffer) -GR_DEFINE_RESOURCE_CACHE_TYPE(GrStencilBuffer) void GrStencilBuffer::transferToCache() { GrAssert(NULL == this->getCacheEntry()); @@ -22,30 +21,28 @@ void GrStencilBuffer::transferToCache() { } namespace { -// we should never have more than one stencil buffer with same combo of -// (width,height,samplecount) -void gen_stencil_key_values(int width, - int height, - int sampleCnt, - GrCacheID* cacheID) { - cacheID->fPublicID = GrCacheID::kDefaultPublicCacheID; - cacheID->fResourceSpecific32 = width | (height << 16); - cacheID->fDomain = GrCacheData::kScratch_ResourceDomain; - - GrAssert(sampleCnt >= 0 && sampleCnt < 256); - cacheID->fResourceSpecific16 = sampleCnt << 8; - - // last 8 bits of 'fResourceSpecific16' is free for flags +// we should never have more than one stencil buffer with same combo of (width,height,samplecount) +void gen_cache_id(int width, int height, int sampleCnt, GrCacheID* cacheID) { + static const GrCacheID::Domain gStencilBufferDomain = GrCacheID::GenerateDomain(); + GrCacheID::Key key; + uint32_t* keyData = key.fData32; + keyData[0] = width; + keyData[1] = height; + keyData[2] = sampleCnt; + GR_STATIC_ASSERT(sizeof(key) >= 3 * sizeof(uint32_t)); + cacheID->reset(gStencilBufferDomain, key); } } GrResourceKey GrStencilBuffer::ComputeKey(int width, int height, int sampleCnt) { - GrCacheID id(GrStencilBuffer::GetResourceType()); - gen_stencil_key_values(width, height, sampleCnt, &id); - - uint32_t v[4]; - id.toRaw(v); - return GrResourceKey(v); + // All SBs are created internally to attach to RTs so they all use the same domain. + static const GrResourceKey::ResourceType gStencilBufferResourceType = + GrResourceKey::GenerateResourceType(); + GrCacheID id; + gen_cache_id(width, height, sampleCnt, &id); + + // we don't use any flags for SBs currently. + return GrResourceKey(id, gStencilBufferResourceType, 0); } diff --git a/src/gpu/GrStencilBuffer.h b/src/gpu/GrStencilBuffer.h index e4e5190598..459d80143f 100644 --- a/src/gpu/GrStencilBuffer.h +++ b/src/gpu/GrStencilBuffer.h @@ -12,7 +12,6 @@ #include "GrClipData.h" #include "GrResource.h" -#include "GrCacheID.h" class GrRenderTarget; class GrResourceEntry; @@ -21,7 +20,6 @@ class GrResourceKey; class GrStencilBuffer : public GrResource { public: SK_DECLARE_INST_COUNT(GrStencilBuffer); - GR_DECLARE_RESOURCE_CACHE_TYPE() virtual ~GrStencilBuffer() { // TODO: allow SB to be purged and detach itself from rts diff --git a/src/gpu/GrTHashCache.h b/src/gpu/GrTHashCache.h index 4494f9f024..854723715d 100644 --- a/src/gpu/GrTHashCache.h +++ b/src/gpu/GrTHashCache.h @@ -226,13 +226,6 @@ void GrTHashTable<T, Key, kHashBits>::unrefAll() { #if GR_DEBUG template <typename T, typename Key, size_t kHashBits> void GrTHashTable<T, Key, kHashBits>::validate() const { - for (size_t i = 0; i < GR_ARRAY_COUNT(fHash); i++) { - if (fHash[i]) { - unsigned hashIndex = hash2Index(Key::GetHash(*fHash[i])); - GrAssert(hashIndex == i); - } - } - int count = fSorted.count(); for (int i = 1; i < count; i++) { GrAssert(Key::LT(*fSorted[i - 1], *fSorted[i]) || diff --git a/src/gpu/GrTexture.cpp b/src/gpu/GrTexture.cpp index c31d774544..eb3a58693f 100644 --- a/src/gpu/GrTexture.cpp +++ b/src/gpu/GrTexture.cpp @@ -15,7 +15,6 @@ #include "GrResourceCache.h" SK_DEFINE_INST_COUNT(GrTexture) -GR_DEFINE_RESOURCE_CACHE_TYPE(GrTexture) /** * This method allows us to interrupt the normal deletion process and place @@ -116,97 +115,73 @@ void GrTexture::validateDesc() const { } } -// These flags need to fit in <= 8 bits so they can be folded into the texture +// These flags need to fit in a GrResourceKey::ResourceFlags so they can be folded into the texture // key -enum TextureBits { - /* - * The kNPOT bit is set when the texture is NPOT and is being repeated - * but the hardware doesn't support that feature. +enum TextureFlags { + /** + * The kStretchToPOT bit is set when the texture is NPOT and is being repeated but the + * hardware doesn't support that feature. */ - kNPOT_TextureBit = 0x1, - /* - * The kFilter bit can only be set when the kNPOT flag is set and indicates - * whether the resizing of the texture should use filtering. This is - * to handle cases where the original texture is indexed to disable - * filtering. + kStretchToPOT_TextureFlag = 0x1, + /** + * The kFilter bit can only be set when the kStretchToPOT flag is set and indicates whether the + * stretched texture should be bilerp filtered or point sampled. */ - kFilter_TextureBit = 0x2, - /* - * The kScratch bit is set if the texture is being used as a scratch - * texture. - */ - kScratch_TextureBit = 0x4, + kFilter_TextureFlag = 0x2, }; namespace { -void gen_texture_key_values(const GrGpu* gpu, - const GrTextureParams* params, - const GrTextureDesc& desc, - const GrCacheData& cacheData, - bool scratch, - GrCacheID* cacheID) { - - uint64_t clientKey = cacheData.fClientCacheID; - - if (scratch) { - // Instead of a client-provided key of the texture contents - // we create a key from the descriptor. - GrAssert(GrCacheData::kScratch_CacheID == clientKey); - clientKey = (desc.fFlags << 8) | ((uint64_t) desc.fConfig << 32); - } - - cacheID->fPublicID = clientKey; - cacheID->fDomain = cacheData.fResourceDomain; - - // we assume we only need 16 bits of width and height - // assert that texture creation will fail anyway if this assumption - // would cause key collisions. - GrAssert(gpu->getCaps().maxTextureSize() <= SK_MaxU16); - cacheID->fResourceSpecific32 = desc.fWidth | (desc.fHeight << 16); - - GrAssert(desc.fSampleCnt >= 0 && desc.fSampleCnt < 256); - cacheID->fResourceSpecific16 = desc.fSampleCnt << 8; - - if (!gpu->getCaps().npotTextureTileSupport()) { - bool isPow2 = GrIsPow2(desc.fWidth) && GrIsPow2(desc.fHeight); - - bool tiled = NULL != params && params->isTiled(); - - if (tiled && !isPow2) { - cacheID->fResourceSpecific16 |= kNPOT_TextureBit; +GrResourceKey::ResourceFlags get_texture_flags(const GrGpu* gpu, + const GrTextureParams* params, + const GrTextureDesc& desc) { + GrResourceKey::ResourceFlags flags = 0; + bool tiled = NULL != params && params->isTiled(); + if (tiled & !gpu->getCaps().npotTextureTileSupport()) { + if (!GrIsPow2(desc.fWidth) || GrIsPow2(desc.fHeight)) { + flags |= kStretchToPOT_TextureFlag; if (params->isBilerp()) { - cacheID->fResourceSpecific16 |= kFilter_TextureBit; + flags |= kFilter_TextureFlag; } } } + return flags; +} - if (scratch) { - cacheID->fResourceSpecific16 |= kScratch_TextureBit; - } +GrResourceKey::ResourceType texture_resource_type() { + static const GrResourceKey::ResourceType gType = GrResourceKey::GenerateResourceType(); + return gType; } } GrResourceKey GrTexture::ComputeKey(const GrGpu* gpu, const GrTextureParams* params, const GrTextureDesc& desc, - const GrCacheData& cacheData, - bool scratch) { - GrCacheID id(GrTexture::GetResourceType()); - gen_texture_key_values(gpu, params, desc, cacheData, scratch, &id); - - uint32_t v[4]; - id.toRaw(v); - return GrResourceKey(v); + const GrCacheID& cacheID) { + GrResourceKey::ResourceFlags flags = get_texture_flags(gpu, params, desc); + return GrResourceKey(cacheID, texture_resource_type(), flags); } -bool GrTexture::NeedsResizing(const GrResourceKey& key) { - return 0 != (key.getValue32(3) & kNPOT_TextureBit); +GrResourceKey GrTexture::ComputeScratchKey(const GrTextureDesc& desc) { + GrCacheID::Key idKey; + // Instead of a client-provided key of the texture contents we create a key from the + // descriptor. + GR_STATIC_ASSERT(sizeof(idKey) >= 12); + GrAssert(desc.fHeight < (1 << 16)); + GrAssert(desc.fWidth < (1 << 16)); + idKey.fData32[0] = (desc.fWidth) | (desc.fHeight << 16); + idKey.fData32[1] = desc.fConfig | desc.fSampleCnt << 16; + idKey.fData32[2] = desc.fFlags; + static const int kPadSize = sizeof(idKey) - 12; + memset(idKey.fData8 + 12, 0, kPadSize); + + GrCacheID cacheID(GrResourceKey::ScratchDomain(), idKey); + return GrResourceKey(cacheID, texture_resource_type(), 0); } -bool GrTexture::IsScratchTexture(const GrResourceKey& key) { - return 0 != (key.getValue32(3) & kScratch_TextureBit); +bool GrTexture::NeedsResizing(const GrResourceKey& key) { + return SkToBool(key.getResourceFlags() & kStretchToPOT_TextureFlag); } bool GrTexture::NeedsFiltering(const GrResourceKey& key) { - return 0 != (key.getValue32(3) & kFilter_TextureBit); + return SkToBool(key.getResourceFlags() & kFilter_TextureFlag); } diff --git a/src/gpu/SkGpuDevice.cpp b/src/gpu/SkGpuDevice.cpp index 7b06d6080b..f4c4cf60aa 100644 --- a/src/gpu/SkGpuDevice.cpp +++ b/src/gpu/SkGpuDevice.cpp @@ -1104,7 +1104,7 @@ bool SkGpuDevice::shouldTileBitmap(const SkBitmap& bitmap, return false; } // if the entire texture is already in our cache then no reason to tile it - if (this->isBitmapInTextureCache(bitmap, params)) { + if (GrIsBitmapInCache(fContext, bitmap, ¶ms)) { return false; } @@ -1848,22 +1848,6 @@ void SkGpuDevice::flush() { /////////////////////////////////////////////////////////////////////////////// -bool SkGpuDevice::isBitmapInTextureCache(const SkBitmap& bitmap, - const GrTextureParams& params) const { - uint64_t key = bitmap.getGenerationID(); - key |= ((uint64_t) bitmap.pixelRefOffset()) << 32; - - GrTextureDesc desc; - desc.fWidth = bitmap.width(); - desc.fHeight = bitmap.height(); - desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config()); - - GrCacheData cacheData(key); - - return this->context()->isTextureInCache(desc, cacheData, ¶ms); -} - - SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config, int width, int height, bool isOpaque, @@ -1883,10 +1867,10 @@ SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config, #if CACHE_COMPATIBLE_DEVICE_TEXTURES // layers are never draw in repeat modes, so we can request an approx // match and ignore any padding. - GrContext::ScratchTexMatch matchType = (kSaveLayer_Usage == usage) ? - GrContext::kApprox_ScratchTexMatch : - GrContext::kExact_ScratchTexMatch; - texture = fContext->lockScratchTexture(desc, matchType); + const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ? + GrContext::kApprox_ScratchTexMatch : + GrContext::kExact_ScratchTexMatch; + texture = fContext->lockScratchTexture(desc, match); #else tunref.reset(fContext->createUncachedTexture(desc, NULL, 0)); texture = tunref.get(); diff --git a/src/gpu/SkGr.cpp b/src/gpu/SkGr.cpp index 79bc75daf0..bc996cfeb2 100644 --- a/src/gpu/SkGr.cpp +++ b/src/gpu/SkGr.cpp @@ -56,8 +56,34 @@ static void build_compressed_data(void* buffer, const SkBitmap& bitmap) { //////////////////////////////////////////////////////////////////////////////// +void generate_bitmap_cache_id(const SkBitmap& bitmap, GrCacheID* id) { + // Our id includes the offset, width, and height so that bitmaps created by extractSubset() + // are unique. + uint32_t genID = bitmap.getGenerationID(); + size_t offset = bitmap.pixelRefOffset(); + int16_t width = static_cast<int16_t>(bitmap.width()); + int16_t height = static_cast<int16_t>(bitmap.height()); + + GrCacheID::Key key; + memcpy(key.fData8, &genID, 4); + memcpy(key.fData8 + 4, &width, 2); + memcpy(key.fData8 + 6, &height, 2); + memcpy(key.fData8 + 8, &offset, sizeof(size_t)); + GR_STATIC_ASSERT(sizeof(key) >= 8 + sizeof(size_t)); + static const GrCacheID::Domain gBitmapTextureDomain = GrCacheID::GenerateDomain(); + id->reset(gBitmapTextureDomain, key); +} + +void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrTextureDesc* desc) { + desc->fFlags = kNone_GrTextureFlags; + desc->fWidth = bitmap.width(); + desc->fHeight = bitmap.height(); + desc->fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config()); + desc->fSampleCnt = 0; +} + static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, - uint64_t key, + bool cache, const GrTextureParams* params, const SkBitmap& origBitmap) { SkAutoLockPixels alp(origBitmap); @@ -71,11 +97,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, const SkBitmap* bitmap = &origBitmap; GrTextureDesc desc; - desc.fWidth = bitmap->width(); - desc.fHeight = bitmap->height(); - desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config()); - - GrCacheData cacheData(key); + generate_bitmap_texture_desc(*bitmap, &desc); if (SkBitmap::kIndex8_Config == bitmap->config()) { // build_compressed_data doesn't do npot->pot expansion @@ -91,31 +113,33 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, // our compressed data will be trimmed, so pass width() for its // "rowBytes", since they are the same now. - if (GrCacheData::kScratch_CacheID != key) { - return ctx->createTexture(params, desc, cacheData, + if (cache) { + GrCacheID cacheID; + generate_bitmap_cache_id(origBitmap, &cacheID); + return ctx->createTexture(params, desc, cacheID, storage.get(), bitmap->width()); } else { GrTexture* result = ctx->lockScratchTexture(desc, - GrContext::kExact_ScratchTexMatch); + GrContext::kExact_ScratchTexMatch); result->writePixels(0, 0, bitmap->width(), bitmap->height(), desc.fConfig, storage.get()); return result; } - } else { origBitmap.copyTo(&tmpBitmap, SkBitmap::kARGB_8888_Config); // now bitmap points to our temp, which has been promoted to 32bits bitmap = &tmpBitmap; + desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config()); } } - desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config()); - if (GrCacheData::kScratch_CacheID != key) { + if (cache) { // This texture is likely to be used again so leave it in the cache - // but locked. - return ctx->createTexture(params, desc, cacheData, + GrCacheID cacheID; + generate_bitmap_cache_id(origBitmap, &cacheID); + return ctx->createTexture(params, desc, cacheID, bitmap->getPixels(), bitmap->rowBytes()); } else { @@ -124,8 +148,7 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, // cache so no one else can find it. Additionally, once unlocked, the // scratch texture will go to the end of the list for purging so will // likely be available for this volatile bitmap the next time around. - GrTexture* result = ctx->lockScratchTexture(desc, - GrContext::kExact_ScratchTexMatch); + GrTexture* result = ctx->lockScratchTexture(desc, GrContext::kExact_ScratchTexMatch); result->writePixels(0, 0, bitmap->width(), bitmap->height(), desc.fConfig, @@ -135,32 +158,37 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, } } -/////////////////////////////////////////////////////////////////////////////// +bool GrIsBitmapInCache(const GrContext* ctx, + const SkBitmap& bitmap, + const GrTextureParams* params) { + GrCacheID cacheID; + generate_bitmap_cache_id(bitmap, &cacheID); + + GrTextureDesc desc; + generate_bitmap_texture_desc(bitmap, &desc); + return ctx->isTextureInCache(desc, cacheID, params); +} GrTexture* GrLockCachedBitmapTexture(GrContext* ctx, const SkBitmap& bitmap, const GrTextureParams* params) { GrTexture* result = NULL; - if (!bitmap.isVolatile()) { - // If the bitmap isn't changing try to find a cached copy first - uint64_t key = bitmap.getGenerationID(); - key |= ((uint64_t) bitmap.pixelRefOffset()) << 32; + bool cache = !bitmap.isVolatile(); - GrTextureDesc desc; - desc.fWidth = bitmap.width(); - desc.fHeight = bitmap.height(); - desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config()); + if (cache) { + // If the bitmap isn't changing try to find a cached copy first. - GrCacheData cacheData(key); + GrCacheID cacheID; + generate_bitmap_cache_id(bitmap, &cacheID); - result = ctx->findTexture(desc, cacheData, params); - if (NULL == result) { - // didn't find a cached copy so create one - result = sk_gr_create_bitmap_texture(ctx, key, params, bitmap); - } - } else { - result = sk_gr_create_bitmap_texture(ctx, GrCacheData::kScratch_CacheID, params, bitmap); + GrTextureDesc desc; + generate_bitmap_texture_desc(bitmap, &desc); + + result = ctx->findTexture(desc, cacheID, params); + } + if (NULL == result) { + result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap); } if (NULL == result) { GrPrintf("---- failed to create texture for cache [%d %d]\n", diff --git a/src/gpu/effects/GrTextureStripAtlas.cpp b/src/gpu/effects/GrTextureStripAtlas.cpp index 92f5ad5c6f..30d7ce4d48 100644 --- a/src/gpu/effects/GrTextureStripAtlas.cpp +++ b/src/gpu/effects/GrTextureStripAtlas.cpp @@ -17,9 +17,6 @@ #define VALIDATE #endif -GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrTextureStripAtlas, GetTextureStripAtlasDomain) - - int32_t GrTextureStripAtlas::gCacheCount = 0; GrTHashTable<GrTextureStripAtlas::AtlasEntry, @@ -73,7 +70,7 @@ GrTextureStripAtlas* GrTextureStripAtlas::GetAtlas(const GrTextureStripAtlas::De } GrTextureStripAtlas::GrTextureStripAtlas(GrTextureStripAtlas::Desc desc) - : fCacheID(sk_atomic_inc(&gCacheCount)) + : fCacheKey(sk_atomic_inc(&gCacheCount)) , fLockedRows(0) , fDesc(desc) , fNumRows(desc.fHeight / desc.fRowHeight) @@ -198,11 +195,16 @@ void GrTextureStripAtlas::lockTexture() { texDesc.fWidth = fDesc.fWidth; texDesc.fHeight = fDesc.fHeight; texDesc.fConfig = fDesc.fConfig; - GrCacheData cacheData(fCacheID); - cacheData.fResourceDomain = GetTextureStripAtlasDomain(); - fTexture = fDesc.fContext->findTexture(texDesc, cacheData, ¶ms); + + static const GrCacheID::Domain gTextureStripAtlasDomain = GrCacheID::GenerateDomain(); + GrCacheID::Key key; + *key.fData32 = fCacheKey; + memset(key.fData32 + 1, 0, sizeof(key) - sizeof(uint32_t)); + GrCacheID cacheID(gTextureStripAtlasDomain, key); + + fTexture = fDesc.fContext->findTexture(texDesc, cacheID, ¶ms); if (NULL == fTexture) { - fTexture = fDesc.fContext->createTexture(¶ms, texDesc, cacheData, NULL, 0); + fTexture = fDesc.fContext->createTexture(¶ms, texDesc, cacheID, NULL, 0); // This is a new texture, so all of our cache info is now invalid this->initLRU(); fKeyTable.rewind(); diff --git a/src/gpu/effects/GrTextureStripAtlas.h b/src/gpu/effects/GrTextureStripAtlas.h index 210d88ec90..1e1e5088c7 100644 --- a/src/gpu/effects/GrTextureStripAtlas.h +++ b/src/gpu/effects/GrTextureStripAtlas.h @@ -21,8 +21,6 @@ */ class GrTextureStripAtlas { public: - GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetTextureStripAtlasDomain) - /** * Descriptor struct which we'll use as a hash table key **/ @@ -157,7 +155,7 @@ private: // A unique ID for this texture (formed with: gCacheCount++), so we can be sure that if we // get a texture back from the texture cache, that it's the same one we last used. - const uint64_t fCacheID; + const int32_t fCacheKey; // Total locks on all rows (when this reaches zero, we can unlock our texture) int32_t fLockedRows; |