diff options
author | bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2012-12-20 14:23:26 +0000 |
---|---|---|
committer | bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2012-12-20 14:23:26 +0000 |
commit | 0b6ad2297fbf43466950690102c1c9c150f2a972 (patch) | |
tree | e0e56f66070ce6edef1c8545bad5d1ec075ecd2e /src/gpu | |
parent | 4b86e3428b115202e82d49a0914ea8ab6dc25940 (diff) |
Revert 6914 to fix build issues.
git-svn-id: http://skia.googlecode.com/svn/trunk@6915 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'src/gpu')
-rw-r--r-- | src/gpu/GrBinHashKey.h | 37 | ||||
-rw-r--r-- | src/gpu/GrCacheID.cpp | 39 | ||||
-rw-r--r-- | src/gpu/GrClipMaskManager.cpp | 3 | ||||
-rw-r--r-- | src/gpu/GrClipMaskManager.h | 2 | ||||
-rw-r--r-- | src/gpu/GrContext.cpp | 45 | ||||
-rw-r--r-- | src/gpu/GrResourceCache.cpp | 44 | ||||
-rw-r--r-- | src/gpu/GrResourceCache.h | 176 | ||||
-rw-r--r-- | src/gpu/GrStencilBuffer.cpp | 39 | ||||
-rw-r--r-- | src/gpu/GrStencilBuffer.h | 2 | ||||
-rw-r--r-- | src/gpu/GrTHashCache.h | 7 | ||||
-rw-r--r-- | src/gpu/GrTexture.cpp | 115 | ||||
-rw-r--r-- | src/gpu/SkGpuDevice.cpp | 26 | ||||
-rw-r--r-- | src/gpu/SkGr.cpp | 94 | ||||
-rw-r--r-- | src/gpu/effects/GrTextureStripAtlas.cpp | 18 | ||||
-rw-r--r-- | src/gpu/effects/GrTextureStripAtlas.h | 4 |
15 files changed, 338 insertions, 313 deletions
diff --git a/src/gpu/GrBinHashKey.h b/src/gpu/GrBinHashKey.h index 8fa53ef68a..d2194e9471 100644 --- a/src/gpu/GrBinHashKey.h +++ b/src/gpu/GrBinHashKey.h @@ -16,27 +16,25 @@ * Hash function class that can take a data chunk of any predetermined length. The hash function * used is the One-at-a-Time Hash (http://burtleburtle.net/bob/hash/doobs.html). * - * Keys are computed from ENTRY objects. ENTRY must be fully ordered by a member: - * int compare(const GrTBinHashKey<ENTRY, ..>& k); - * which returns negative if the ENTRY < k, 0 if it equals k, and positive if k < the ENTRY. - * Additionally, ENTRY must be flattenable into the key using setKeyData. + * Keys are computed from Entry objects. Entry must be fully ordered by a member: + * int compare(const GrTBinHashKey<Entry, ..>& k); + * which returns negative if the Entry < k, 0 if it equals k, and positive if k < the Entry. + * Additionally, Entry must be flattenable into the key using setKeyData. * * This class satisfies the requirements to be a key for a GrTHashTable. */ -template<typename ENTRY, size_t KEY_SIZE> +template<typename Entry, size_t KeySize> class GrTBinHashKey { public: - enum { kKeySize = KEY_SIZE }; - GrTBinHashKey() { this->reset(); } - GrTBinHashKey(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) { + GrTBinHashKey(const GrTBinHashKey<Entry, KeySize>& other) { *this = other; } - GrTBinHashKey<ENTRY, KEY_SIZE>& operator=(const GrTBinHashKey<ENTRY, KEY_SIZE>& other) { + GrTBinHashKey<Entry, KeySize>& operator=(const GrTBinHashKey<Entry, KeySize>& other) { memcpy(this, &other, sizeof(*this)); return *this; } @@ -52,11 +50,11 @@ public: } void setKeyData(const uint32_t* SK_RESTRICT data) { - GrAssert(GrIsALIGN4(KEY_SIZE)); - memcpy(&fData, data, KEY_SIZE); + GrAssert(GrIsALIGN4(KeySize)); + memcpy(&fData, data, KeySize); uint32_t hash = 0; - size_t len = KEY_SIZE; + size_t len = KeySize; while (len >= 4) { hash += *data++; hash += (fHash << 10); @@ -72,17 +70,17 @@ public: fHash = hash; } - int compare(const GrTBinHashKey<ENTRY, KEY_SIZE>& key) const { + int compare(const GrTBinHashKey<Entry, KeySize>& key) const { GrAssert(fIsValid && key.fIsValid); - return memcmp(fData, key.fData, KEY_SIZE); + return memcmp(fData, key.fData, KeySize); } - static bool EQ(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) { + static bool EQ(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) { GrAssert(key.fIsValid); return 0 == entry.compare(key); } - static bool LT(const ENTRY& entry, const GrTBinHashKey<ENTRY, KEY_SIZE>& key) { + static bool LT(const Entry& entry, const GrTBinHashKey<Entry, KeySize>& key) { GrAssert(key.fIsValid); return entry.compare(key) < 0; } @@ -92,14 +90,9 @@ public: return fHash; } - const uint8_t* getData() const { - GrAssert(fIsValid); - return fData; - } - private: uint32_t fHash; - uint8_t fData[KEY_SIZE]; // Buffer for key storage + uint8_t fData[KeySize]; // Buffer for key storage #if GR_DEBUG public: diff --git a/src/gpu/GrCacheID.cpp b/src/gpu/GrCacheID.cpp index ce47af3e3f..4c6dd492c3 100644 --- a/src/gpu/GrCacheID.cpp +++ b/src/gpu/GrCacheID.cpp @@ -5,21 +5,40 @@ * found in the LICENSE file. */ -#include "GrTypes.h" +#include "GrCacheID.h" #include "SkThread.h" // for sk_atomic_inc -static const GrCacheID::Key kAssertKey; -GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData32)); -GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey.fData64)); -GR_STATIC_ASSERT(sizeof(kAssertKey.fData8) == sizeof(kAssertKey)); - -GrCacheID::Domain GrCacheID::GenerateDomain() { - static int32_t gNextDomain = kInvalid_Domain + 1; +uint8_t GrCacheID::GetNextDomain() { + // 0 reserved for kUnrestricted_ResourceDomain + static int32_t gNextDomain = 1; int32_t domain = sk_atomic_inc(&gNextDomain); - if (domain >= 1 << (8 * sizeof(Domain))) { + if (domain >= 256) { GrCrash("Too many Cache Domains"); } - return static_cast<Domain>(domain); + return (uint8_t) domain; +} + +uint8_t GrCacheID::GetNextResourceType() { + // 0 reserved for kInvalid_ResourceType + static int32_t gNextResourceType = 1; + + int32_t type = sk_atomic_inc(&gNextResourceType); + if (type >= 256) { + GrCrash("Too many Cache Resource Types"); + } + + return (uint8_t) type; +} + +void GrCacheID::toRaw(uint32_t v[4]) { + GrAssert(4*sizeof(uint32_t) == sizeof(GrCacheID)); + + v[0] = (uint32_t) (fPublicID & 0xffffffffUL); + v[1] = (uint32_t) ((fPublicID >> 32) & 0xffffffffUL); + v[2] = fResourceSpecific32; + v[3] = fDomain << 24 | + fResourceType << 16 | + fResourceSpecific16; } diff --git a/src/gpu/GrClipMaskManager.cpp b/src/gpu/GrClipMaskManager.cpp index 7eab0ce68f..a596088ccf 100644 --- a/src/gpu/GrClipMaskManager.cpp +++ b/src/gpu/GrClipMaskManager.cpp @@ -18,9 +18,12 @@ #include "GrAAConvexPathRenderer.h" #include "GrAAHairLinePathRenderer.h" #include "GrSWMaskHelper.h" +#include "GrCacheID.h" #include "SkTLazy.h" +GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrClipMaskManager, GetAlphaMaskDomain) + #define GR_AA_CLIP 1 typedef SkClipStack::Element Element; diff --git a/src/gpu/GrClipMaskManager.h b/src/gpu/GrClipMaskManager.h index 534689a9bb..48d17e1fdc 100644 --- a/src/gpu/GrClipMaskManager.h +++ b/src/gpu/GrClipMaskManager.h @@ -41,6 +41,8 @@ class GrDrawState; */ class GrClipMaskManager : public GrNoncopyable { public: + GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetAlphaMaskDomain) + GrClipMaskManager() : fGpu(NULL) , fCurrClipMaskType(kNone_ClipMaskType) { diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp index 12fbeb6e31..d00e062fae 100644 --- a/src/gpu/GrContext.cpp +++ b/src/gpu/GrContext.cpp @@ -209,20 +209,18 @@ void convolve_gaussian(GrDrawTarget* target, } -//////////////////////////////////////////////////////////////////////////////// - GrTexture* GrContext::findTexture(const GrTextureDesc& desc, - const GrCacheID& cacheID, + const GrCacheData& cacheData, const GrTextureParams* params) { - GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false); GrResource* resource = fTextureCache->find(resourceKey); return static_cast<GrTexture*>(resource); } bool GrContext::isTextureInCache(const GrTextureDesc& desc, - const GrCacheID& cacheID, + const GrCacheData& cacheData, const GrTextureParams* params) const { - GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false); return fTextureCache->hasKey(resourceKey); } @@ -274,13 +272,13 @@ static void stretchImage(void* dst, // The desired texture is NPOT and tiled but that isn't supported by // the current hardware. Resize the texture to be a POT GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, - const GrCacheID& cacheID, + const GrCacheData& cacheData, void* srcData, size_t rowBytes, bool needsFiltering) { - GrTexture* clampedTexture = this->findTexture(desc, cacheID, NULL); + GrTexture* clampedTexture = this->findTexture(desc, cacheData, NULL); if (NULL == clampedTexture) { - clampedTexture = this->createTexture(NULL, desc, cacheID, srcData, rowBytes); + clampedTexture = this->createTexture(NULL, desc, cacheData, srcData, rowBytes); if (NULL == clampedTexture) { return NULL; @@ -352,22 +350,22 @@ GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, GrTexture* GrContext::createTexture( const GrTextureParams* params, const GrTextureDesc& desc, - const GrCacheID& cacheID, + const GrCacheData& cacheData, void* srcData, size_t rowBytes) { - SK_TRACE_EVENT0("GrContext::createTexture"); + SK_TRACE_EVENT0("GrContext::createAndLockTexture"); #if GR_DUMP_TEXTURE_UPLOAD - GrPrintf("GrContext::createTexture[%d %d]\n", desc.fWidth, desc.fHeight); + GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); #endif - GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); + GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false); SkAutoTUnref<GrTexture> texture; if (GrTexture::NeedsResizing(resourceKey)) { - texture.reset(this->createResizedTexture(desc, cacheID, - srcData, rowBytes, - GrTexture::NeedsFiltering(resourceKey))); + texture.reset(this->createResizedTexture(desc, cacheData, + srcData, rowBytes, + GrTexture::NeedsFiltering(resourceKey))); } else { texture.reset(fGpu->createTexture(desc, srcData, rowBytes)); } @@ -379,13 +377,15 @@ GrTexture* GrContext::createTexture( return texture; } -GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) { +GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, + ScratchTexMatch match) { GrTextureDesc desc = inDesc; + GrCacheData cacheData(GrCacheData::kScratch_CacheID); GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) || !(desc.fFlags & kNoStencil_GrTextureFlagBit)); - if (kApprox_ScratchTexMatch == match) { + if (kExact_ScratchTexMatch != match) { // bin by pow2 with a reasonable min static const int MIN_SIZE = 256; desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); @@ -399,7 +399,7 @@ GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, ScratchTex bool doubledH = false; do { - GrResourceKey key = GrTexture::ComputeScratchKey(desc); + GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, cacheData, true); // Ensure we have exclusive access to the texture so future 'find' calls don't return it resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag); // if we miss, relax the fit of the flags... @@ -433,7 +433,10 @@ GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc, ScratchTex desc.fHeight = origHeight; SkAutoTUnref<GrTexture> texture(fGpu->createTexture(desc, NULL, 0)); if (NULL != texture) { - GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc()); + GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, + texture->desc(), + cacheData, + true); // Make the resource exclusive so future 'find' calls don't return it fTextureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag); resource = texture; @@ -472,7 +475,7 @@ void GrContext::unlockScratchTexture(GrTexture* texture) { // If this is a scratch texture we detached it from the cache // while it was locked (to avoid two callers simultaneously getting // the same texture). - if (texture->getCacheEntry()->key().isScratch()) { + if (GrTexture::IsScratchTexture(texture->getCacheEntry()->key())) { fTextureCache->makeNonExclusive(texture->getCacheEntry()); } diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp index 45b999f3d0..a1f1d794e5 100644 --- a/src/gpu/GrResourceCache.cpp +++ b/src/gpu/GrResourceCache.cpp @@ -11,20 +11,6 @@ #include "GrResourceCache.h" #include "GrResource.h" - -GrResourceKey::ResourceType GrResourceKey::GenerateResourceType() { - static int32_t gNextType = 0; - - int32_t type = sk_atomic_inc(&gNextType); - if (type >= (1 << 8 * sizeof(ResourceType))) { - GrCrash("Too many Resource Types"); - } - - return static_cast<ResourceType>(type); -} - -/////////////////////////////////////////////////////////////////////////////// - GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource) : fKey(key), fResource(resource) { // we assume ownership of the resource, and will unref it when we die @@ -47,6 +33,36 @@ void GrResourceEntry::validate() const { /////////////////////////////////////////////////////////////////////////////// +class GrResourceCache::Key { + typedef GrResourceEntry T; + + const GrResourceKey& fKey; +public: + Key(const GrResourceKey& key) : fKey(key) {} + + uint32_t getHash() const { return fKey.hashIndex(); } + + static bool LT(const T& entry, const Key& key) { + return entry.key() < key.fKey; + } + static bool EQ(const T& entry, const Key& key) { + return entry.key() == key.fKey; + } +#if GR_DEBUG + static uint32_t GetHash(const T& entry) { + return entry.key().hashIndex(); + } + static bool LT(const T& a, const T& b) { + return a.key() < b.key(); + } + static bool EQ(const T& a, const T& b) { + return a.key() == b.key(); + } +#endif +}; + +/////////////////////////////////////////////////////////////////////////////// + GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) : fMaxCount(maxCount), fMaxBytes(maxBytes) { diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h index b4c9df7ad2..e1207a204b 100644 --- a/src/gpu/GrResourceCache.h +++ b/src/gpu/GrResourceCache.h @@ -14,12 +14,27 @@ #include "GrConfig.h" #include "GrTypes.h" #include "GrTHashCache.h" -#include "GrBinHashKey.h" #include "SkTInternalLList.h" class GrResource; -class GrResourceEntry; +// return true if a<b, or false if b<a +// +#define RET_IF_LT_OR_GT(a, b) \ + do { \ + if ((a) < (b)) { \ + return true; \ + } \ + if ((b) < (a)) { \ + return false; \ + } \ + } while (0) + +/** + * Helper class for GrResourceCache, the Key is used to identify src data for + * a resource. It is identified by 2 32bit data fields which can hold any + * data (uninterpreted by the cache) and a width/height. + */ class GrResourceKey { public: enum { @@ -28,118 +43,82 @@ public: kHashMask = kHashCount - 1 }; - static GrCacheID::Domain ScratchDomain() { - static const GrCacheID::Domain gDomain = GrCacheID::GenerateDomain(); - return gDomain; + GrResourceKey(uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) { + fP[0] = p0; + fP[1] = p1; + fP[2] = p2; + fP[3] = p3; + this->computeHashIndex(); } - /** Uniquely identifies the GrResource subclass in the key to avoid collisions - across resource types. */ - typedef uint8_t ResourceType; - - /** Flags set by the GrResource subclass. */ - typedef uint8_t ResourceFlags; - - /** Generate a unique ResourceType */ - static ResourceType GenerateResourceType(); - - /** Creates a key for resource */ - GrResourceKey(const GrCacheID& id, ResourceType type, ResourceFlags flags) { - this->init(id.getDomain(), id.getKey(), type, flags); - }; - - GrResourceKey(const GrResourceKey& src) { - fKey = src.fKey; - } - - GrResourceKey() { - fKey.fHashedKey.reset(); + GrResourceKey(uint32_t v[4]) { + memcpy(fP, v, 4 * sizeof(uint32_t)); + this->computeHashIndex(); } - void reset(const GrCacheID& id, ResourceType type, ResourceFlags flags) { - this->init(id.getDomain(), id.getKey(), type, flags); + GrResourceKey(const GrResourceKey& src) { + memcpy(fP, src.fP, 4 * sizeof(uint32_t)); +#if GR_DEBUG + this->computeHashIndex(); + GrAssert(fHashIndex == src.fHashIndex); +#endif + fHashIndex = src.fHashIndex; } //!< returns hash value [0..kHashMask] for the key - int getHash() const { - return fKey.fHashedKey.getHash() & kHashMask; - } + int hashIndex() const { return fHashIndex; } - bool isScratch() const { - return ScratchDomain() == - *reinterpret_cast<const GrCacheID::Domain*>(fKey.fHashedKey.getData() + - kCacheIDDomainOffset); + friend bool operator==(const GrResourceKey& a, const GrResourceKey& b) { + GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex); + return 0 == memcmp(a.fP, b.fP, 4 * sizeof(uint32_t)); } - ResourceType getResourceType() const { - return *reinterpret_cast<const ResourceType*>(fKey.fHashedKey.getData() + - kResourceTypeOffset); + friend bool operator!=(const GrResourceKey& a, const GrResourceKey& b) { + GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex); + return !(a == b); } - ResourceFlags getResourceFlags() const { - return *reinterpret_cast<const ResourceFlags*>(fKey.fHashedKey.getData() + - kResourceFlagsOffset); + friend bool operator<(const GrResourceKey& a, const GrResourceKey& b) { + RET_IF_LT_OR_GT(a.fP[0], b.fP[0]); + RET_IF_LT_OR_GT(a.fP[1], b.fP[1]); + RET_IF_LT_OR_GT(a.fP[2], b.fP[2]); + return a.fP[3] < b.fP[3]; } - int compare(const GrResourceKey& other) const { - return fKey.fHashedKey.compare(other.fKey.fHashedKey); + uint32_t getValue32(int i) const { + GrAssert(i >=0 && i < 4); + return fP[i]; } +private: - static bool LT(const GrResourceKey& a, const GrResourceKey& b) { - return a.compare(b) < 0; + static uint32_t rol(uint32_t x) { + return (x >> 24) | (x << 8); } - - static bool EQ(const GrResourceKey& a, const GrResourceKey& b) { - return 0 == a.compare(b); + static uint32_t ror(uint32_t x) { + return (x >> 8) | (x << 24); + } + static uint32_t rohalf(uint32_t x) { + return (x >> 16) | (x << 16); } - inline static bool LT(const GrResourceEntry& entry, const GrResourceKey& key); - inline static bool EQ(const GrResourceEntry& entry, const GrResourceKey& key); - inline static bool LT(const GrResourceEntry& a, const GrResourceEntry& b); - inline static bool EQ(const GrResourceEntry& a, const GrResourceEntry& b); - -private: - enum { - kCacheIDKeyOffset = 0, - kCacheIDDomainOffset = kCacheIDKeyOffset + sizeof(GrCacheID::Key), - kResourceTypeOffset = kCacheIDDomainOffset + sizeof(GrCacheID::Domain), - kResourceFlagsOffset = kResourceTypeOffset + sizeof(ResourceType), - kPadOffset = kResourceFlagsOffset + sizeof(ResourceFlags), - kKeySize = SkAlign4(kPadOffset), - kPadSize = kKeySize - kPadOffset - }; - - void init(const GrCacheID::Domain domain, - const GrCacheID::Key& key, - ResourceType type, - ResourceFlags flags) { - union { - uint8_t fKey8[kKeySize]; - uint32_t fKey32[kKeySize / 4]; - } keyData; - - uint8_t* k = keyData.fKey8; - memcpy(k + kCacheIDKeyOffset, key.fData8, sizeof(GrCacheID::Key)); - memcpy(k + kCacheIDDomainOffset, &domain, sizeof(GrCacheID::Domain)); - memcpy(k + kResourceTypeOffset, &type, sizeof(ResourceType)); - memcpy(k + kResourceFlagsOffset, &flags, sizeof(ResourceFlags)); - memset(k + kPadOffset, 0, kPadSize); - fKey.fHashedKey.setKeyData(keyData.fKey32); + void computeHashIndex() { + uint32_t hash = fP[0] ^ rol(fP[1]) ^ ror(fP[2]) ^ rohalf(fP[3]); + // this way to mix and reduce hash to its index may have to change + // depending on how many bits we allocate to the index + hash ^= hash >> 16; + hash ^= hash >> 8; + fHashIndex = hash & kHashMask; } - struct Key; - typedef GrTBinHashKey<Key, kKeySize> HashedKey; + uint32_t fP[4]; - struct Key { - int compare(const HashedKey& hashedKey) const { - fHashedKey.compare(fHashedKey); - } - HashedKey fHashedKey; - }; + // this is computed from the fP... fields + int fHashIndex; - Key fKey; + friend class GrContext; }; + /////////////////////////////////////////////////////////////////////////////// class GrResourceEntry { @@ -167,22 +146,6 @@ private: friend class GrDLinkedList; }; -bool GrResourceKey::LT(const GrResourceEntry& entry, const GrResourceKey& key) { - return LT(entry.key(), key); -} - -bool GrResourceKey::EQ(const GrResourceEntry& entry, const GrResourceKey& key) { - return EQ(entry.key(), key); -} - -bool GrResourceKey::LT(const GrResourceEntry& a, const GrResourceEntry& b) { - return LT(a.key(), b.key()); -} - -bool GrResourceKey::EQ(const GrResourceEntry& a, const GrResourceEntry& b) { - return EQ(a.key(), b.key()); -} - /////////////////////////////////////////////////////////////////////////////// #include "GrTHashCache.h" @@ -326,7 +289,8 @@ private: void removeInvalidResource(GrResourceEntry* entry); - GrTHashTable<GrResourceEntry, GrResourceKey, 8> fCache; + class Key; + GrTHashTable<GrResourceEntry, Key, 8> fCache; // We're an internal doubly linked list typedef SkTInternalLList<GrResourceEntry> EntryList; diff --git a/src/gpu/GrStencilBuffer.cpp b/src/gpu/GrStencilBuffer.cpp index 623861df8f..180912e02d 100644 --- a/src/gpu/GrStencilBuffer.cpp +++ b/src/gpu/GrStencilBuffer.cpp @@ -13,6 +13,7 @@ #include "GrResourceCache.h" SK_DEFINE_INST_COUNT(GrStencilBuffer) +GR_DEFINE_RESOURCE_CACHE_TYPE(GrStencilBuffer) void GrStencilBuffer::transferToCache() { GrAssert(NULL == this->getCacheEntry()); @@ -21,28 +22,30 @@ void GrStencilBuffer::transferToCache() { } namespace { -// we should never have more than one stencil buffer with same combo of (width,height,samplecount) -void gen_cache_id(int width, int height, int sampleCnt, GrCacheID* cacheID) { - static const GrCacheID::Domain gStencilBufferDomain = GrCacheID::GenerateDomain(); - GrCacheID::Key key; - uint32_t* keyData = key.fData32; - keyData[0] = width; - keyData[1] = height; - keyData[2] = sampleCnt; - GR_STATIC_ASSERT(sizeof(key) >= 3 * sizeof(uint32_t)); - cacheID->reset(gStencilBufferDomain, key); +// we should never have more than one stencil buffer with same combo of +// (width,height,samplecount) +void gen_stencil_key_values(int width, + int height, + int sampleCnt, + GrCacheID* cacheID) { + cacheID->fPublicID = GrCacheID::kDefaultPublicCacheID; + cacheID->fResourceSpecific32 = width | (height << 16); + cacheID->fDomain = GrCacheData::kScratch_ResourceDomain; + + GrAssert(sampleCnt >= 0 && sampleCnt < 256); + cacheID->fResourceSpecific16 = sampleCnt << 8; + + // last 8 bits of 'fResourceSpecific16' is free for flags } } GrResourceKey GrStencilBuffer::ComputeKey(int width, int height, int sampleCnt) { - // All SBs are created internally to attach to RTs so they all use the same domain. - static const GrResourceKey::ResourceType gStencilBufferResourceType = - GrResourceKey::GenerateResourceType(); - GrCacheID id; - gen_cache_id(width, height, sampleCnt, &id); - - // we don't use any flags for SBs currently. - return GrResourceKey(id, gStencilBufferResourceType, 0); + GrCacheID id(GrStencilBuffer::GetResourceType()); + gen_stencil_key_values(width, height, sampleCnt, &id); + + uint32_t v[4]; + id.toRaw(v); + return GrResourceKey(v); } diff --git a/src/gpu/GrStencilBuffer.h b/src/gpu/GrStencilBuffer.h index 459d80143f..e4e5190598 100644 --- a/src/gpu/GrStencilBuffer.h +++ b/src/gpu/GrStencilBuffer.h @@ -12,6 +12,7 @@ #include "GrClipData.h" #include "GrResource.h" +#include "GrCacheID.h" class GrRenderTarget; class GrResourceEntry; @@ -20,6 +21,7 @@ class GrResourceKey; class GrStencilBuffer : public GrResource { public: SK_DECLARE_INST_COUNT(GrStencilBuffer); + GR_DECLARE_RESOURCE_CACHE_TYPE() virtual ~GrStencilBuffer() { // TODO: allow SB to be purged and detach itself from rts diff --git a/src/gpu/GrTHashCache.h b/src/gpu/GrTHashCache.h index 854723715d..4494f9f024 100644 --- a/src/gpu/GrTHashCache.h +++ b/src/gpu/GrTHashCache.h @@ -226,6 +226,13 @@ void GrTHashTable<T, Key, kHashBits>::unrefAll() { #if GR_DEBUG template <typename T, typename Key, size_t kHashBits> void GrTHashTable<T, Key, kHashBits>::validate() const { + for (size_t i = 0; i < GR_ARRAY_COUNT(fHash); i++) { + if (fHash[i]) { + unsigned hashIndex = hash2Index(Key::GetHash(*fHash[i])); + GrAssert(hashIndex == i); + } + } + int count = fSorted.count(); for (int i = 1; i < count; i++) { GrAssert(Key::LT(*fSorted[i - 1], *fSorted[i]) || diff --git a/src/gpu/GrTexture.cpp b/src/gpu/GrTexture.cpp index eb3a58693f..c31d774544 100644 --- a/src/gpu/GrTexture.cpp +++ b/src/gpu/GrTexture.cpp @@ -15,6 +15,7 @@ #include "GrResourceCache.h" SK_DEFINE_INST_COUNT(GrTexture) +GR_DEFINE_RESOURCE_CACHE_TYPE(GrTexture) /** * This method allows us to interrupt the normal deletion process and place @@ -115,73 +116,97 @@ void GrTexture::validateDesc() const { } } -// These flags need to fit in a GrResourceKey::ResourceFlags so they can be folded into the texture +// These flags need to fit in <= 8 bits so they can be folded into the texture // key -enum TextureFlags { - /** - * The kStretchToPOT bit is set when the texture is NPOT and is being repeated but the - * hardware doesn't support that feature. +enum TextureBits { + /* + * The kNPOT bit is set when the texture is NPOT and is being repeated + * but the hardware doesn't support that feature. */ - kStretchToPOT_TextureFlag = 0x1, - /** - * The kFilter bit can only be set when the kStretchToPOT flag is set and indicates whether the - * stretched texture should be bilerp filtered or point sampled. + kNPOT_TextureBit = 0x1, + /* + * The kFilter bit can only be set when the kNPOT flag is set and indicates + * whether the resizing of the texture should use filtering. This is + * to handle cases where the original texture is indexed to disable + * filtering. */ - kFilter_TextureFlag = 0x2, + kFilter_TextureBit = 0x2, + /* + * The kScratch bit is set if the texture is being used as a scratch + * texture. + */ + kScratch_TextureBit = 0x4, }; namespace { -GrResourceKey::ResourceFlags get_texture_flags(const GrGpu* gpu, - const GrTextureParams* params, - const GrTextureDesc& desc) { - GrResourceKey::ResourceFlags flags = 0; - bool tiled = NULL != params && params->isTiled(); - if (tiled & !gpu->getCaps().npotTextureTileSupport()) { - if (!GrIsPow2(desc.fWidth) || GrIsPow2(desc.fHeight)) { - flags |= kStretchToPOT_TextureFlag; +void gen_texture_key_values(const GrGpu* gpu, + const GrTextureParams* params, + const GrTextureDesc& desc, + const GrCacheData& cacheData, + bool scratch, + GrCacheID* cacheID) { + + uint64_t clientKey = cacheData.fClientCacheID; + + if (scratch) { + // Instead of a client-provided key of the texture contents + // we create a key from the descriptor. + GrAssert(GrCacheData::kScratch_CacheID == clientKey); + clientKey = (desc.fFlags << 8) | ((uint64_t) desc.fConfig << 32); + } + + cacheID->fPublicID = clientKey; + cacheID->fDomain = cacheData.fResourceDomain; + + // we assume we only need 16 bits of width and height + // assert that texture creation will fail anyway if this assumption + // would cause key collisions. + GrAssert(gpu->getCaps().maxTextureSize() <= SK_MaxU16); + cacheID->fResourceSpecific32 = desc.fWidth | (desc.fHeight << 16); + + GrAssert(desc.fSampleCnt >= 0 && desc.fSampleCnt < 256); + cacheID->fResourceSpecific16 = desc.fSampleCnt << 8; + + if (!gpu->getCaps().npotTextureTileSupport()) { + bool isPow2 = GrIsPow2(desc.fWidth) && GrIsPow2(desc.fHeight); + + bool tiled = NULL != params && params->isTiled(); + + if (tiled && !isPow2) { + cacheID->fResourceSpecific16 |= kNPOT_TextureBit; if (params->isBilerp()) { - flags |= kFilter_TextureFlag; + cacheID->fResourceSpecific16 |= kFilter_TextureBit; } } } - return flags; -} -GrResourceKey::ResourceType texture_resource_type() { - static const GrResourceKey::ResourceType gType = GrResourceKey::GenerateResourceType(); - return gType; + if (scratch) { + cacheID->fResourceSpecific16 |= kScratch_TextureBit; + } } } GrResourceKey GrTexture::ComputeKey(const GrGpu* gpu, const GrTextureParams* params, const GrTextureDesc& desc, - const GrCacheID& cacheID) { - GrResourceKey::ResourceFlags flags = get_texture_flags(gpu, params, desc); - return GrResourceKey(cacheID, texture_resource_type(), flags); + const GrCacheData& cacheData, + bool scratch) { + GrCacheID id(GrTexture::GetResourceType()); + gen_texture_key_values(gpu, params, desc, cacheData, scratch, &id); + + uint32_t v[4]; + id.toRaw(v); + return GrResourceKey(v); } -GrResourceKey GrTexture::ComputeScratchKey(const GrTextureDesc& desc) { - GrCacheID::Key idKey; - // Instead of a client-provided key of the texture contents we create a key from the - // descriptor. - GR_STATIC_ASSERT(sizeof(idKey) >= 12); - GrAssert(desc.fHeight < (1 << 16)); - GrAssert(desc.fWidth < (1 << 16)); - idKey.fData32[0] = (desc.fWidth) | (desc.fHeight << 16); - idKey.fData32[1] = desc.fConfig | desc.fSampleCnt << 16; - idKey.fData32[2] = desc.fFlags; - static const int kPadSize = sizeof(idKey) - 12; - memset(idKey.fData8 + 12, 0, kPadSize); - - GrCacheID cacheID(GrResourceKey::ScratchDomain(), idKey); - return GrResourceKey(cacheID, texture_resource_type(), 0); +bool GrTexture::NeedsResizing(const GrResourceKey& key) { + return 0 != (key.getValue32(3) & kNPOT_TextureBit); } -bool GrTexture::NeedsResizing(const GrResourceKey& key) { - return SkToBool(key.getResourceFlags() & kStretchToPOT_TextureFlag); +bool GrTexture::IsScratchTexture(const GrResourceKey& key) { + return 0 != (key.getValue32(3) & kScratch_TextureBit); } bool GrTexture::NeedsFiltering(const GrResourceKey& key) { - return SkToBool(key.getResourceFlags() & kFilter_TextureFlag); + return 0 != (key.getValue32(3) & kFilter_TextureBit); } diff --git a/src/gpu/SkGpuDevice.cpp b/src/gpu/SkGpuDevice.cpp index f4c4cf60aa..7b06d6080b 100644 --- a/src/gpu/SkGpuDevice.cpp +++ b/src/gpu/SkGpuDevice.cpp @@ -1104,7 +1104,7 @@ bool SkGpuDevice::shouldTileBitmap(const SkBitmap& bitmap, return false; } // if the entire texture is already in our cache then no reason to tile it - if (GrIsBitmapInCache(fContext, bitmap, ¶ms)) { + if (this->isBitmapInTextureCache(bitmap, params)) { return false; } @@ -1848,6 +1848,22 @@ void SkGpuDevice::flush() { /////////////////////////////////////////////////////////////////////////////// +bool SkGpuDevice::isBitmapInTextureCache(const SkBitmap& bitmap, + const GrTextureParams& params) const { + uint64_t key = bitmap.getGenerationID(); + key |= ((uint64_t) bitmap.pixelRefOffset()) << 32; + + GrTextureDesc desc; + desc.fWidth = bitmap.width(); + desc.fHeight = bitmap.height(); + desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config()); + + GrCacheData cacheData(key); + + return this->context()->isTextureInCache(desc, cacheData, ¶ms); +} + + SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config, int width, int height, bool isOpaque, @@ -1867,10 +1883,10 @@ SkDevice* SkGpuDevice::onCreateCompatibleDevice(SkBitmap::Config config, #if CACHE_COMPATIBLE_DEVICE_TEXTURES // layers are never draw in repeat modes, so we can request an approx // match and ignore any padding. - const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ? - GrContext::kApprox_ScratchTexMatch : - GrContext::kExact_ScratchTexMatch; - texture = fContext->lockScratchTexture(desc, match); + GrContext::ScratchTexMatch matchType = (kSaveLayer_Usage == usage) ? + GrContext::kApprox_ScratchTexMatch : + GrContext::kExact_ScratchTexMatch; + texture = fContext->lockScratchTexture(desc, matchType); #else tunref.reset(fContext->createUncachedTexture(desc, NULL, 0)); texture = tunref.get(); diff --git a/src/gpu/SkGr.cpp b/src/gpu/SkGr.cpp index bc996cfeb2..79bc75daf0 100644 --- a/src/gpu/SkGr.cpp +++ b/src/gpu/SkGr.cpp @@ -56,34 +56,8 @@ static void build_compressed_data(void* buffer, const SkBitmap& bitmap) { //////////////////////////////////////////////////////////////////////////////// -void generate_bitmap_cache_id(const SkBitmap& bitmap, GrCacheID* id) { - // Our id includes the offset, width, and height so that bitmaps created by extractSubset() - // are unique. - uint32_t genID = bitmap.getGenerationID(); - size_t offset = bitmap.pixelRefOffset(); - int16_t width = static_cast<int16_t>(bitmap.width()); - int16_t height = static_cast<int16_t>(bitmap.height()); - - GrCacheID::Key key; - memcpy(key.fData8, &genID, 4); - memcpy(key.fData8 + 4, &width, 2); - memcpy(key.fData8 + 6, &height, 2); - memcpy(key.fData8 + 8, &offset, sizeof(size_t)); - GR_STATIC_ASSERT(sizeof(key) >= 8 + sizeof(size_t)); - static const GrCacheID::Domain gBitmapTextureDomain = GrCacheID::GenerateDomain(); - id->reset(gBitmapTextureDomain, key); -} - -void generate_bitmap_texture_desc(const SkBitmap& bitmap, GrTextureDesc* desc) { - desc->fFlags = kNone_GrTextureFlags; - desc->fWidth = bitmap.width(); - desc->fHeight = bitmap.height(); - desc->fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config()); - desc->fSampleCnt = 0; -} - static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, - bool cache, + uint64_t key, const GrTextureParams* params, const SkBitmap& origBitmap) { SkAutoLockPixels alp(origBitmap); @@ -97,7 +71,11 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, const SkBitmap* bitmap = &origBitmap; GrTextureDesc desc; - generate_bitmap_texture_desc(*bitmap, &desc); + desc.fWidth = bitmap->width(); + desc.fHeight = bitmap->height(); + desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config()); + + GrCacheData cacheData(key); if (SkBitmap::kIndex8_Config == bitmap->config()) { // build_compressed_data doesn't do npot->pot expansion @@ -113,33 +91,31 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, // our compressed data will be trimmed, so pass width() for its // "rowBytes", since they are the same now. - if (cache) { - GrCacheID cacheID; - generate_bitmap_cache_id(origBitmap, &cacheID); - return ctx->createTexture(params, desc, cacheID, + if (GrCacheData::kScratch_CacheID != key) { + return ctx->createTexture(params, desc, cacheData, storage.get(), bitmap->width()); } else { GrTexture* result = ctx->lockScratchTexture(desc, - GrContext::kExact_ScratchTexMatch); + GrContext::kExact_ScratchTexMatch); result->writePixels(0, 0, bitmap->width(), bitmap->height(), desc.fConfig, storage.get()); return result; } + } else { origBitmap.copyTo(&tmpBitmap, SkBitmap::kARGB_8888_Config); // now bitmap points to our temp, which has been promoted to 32bits bitmap = &tmpBitmap; - desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config()); } } - if (cache) { + desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap->config()); + if (GrCacheData::kScratch_CacheID != key) { // This texture is likely to be used again so leave it in the cache - GrCacheID cacheID; - generate_bitmap_cache_id(origBitmap, &cacheID); - return ctx->createTexture(params, desc, cacheID, + // but locked. + return ctx->createTexture(params, desc, cacheData, bitmap->getPixels(), bitmap->rowBytes()); } else { @@ -148,7 +124,8 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, // cache so no one else can find it. Additionally, once unlocked, the // scratch texture will go to the end of the list for purging so will // likely be available for this volatile bitmap the next time around. - GrTexture* result = ctx->lockScratchTexture(desc, GrContext::kExact_ScratchTexMatch); + GrTexture* result = ctx->lockScratchTexture(desc, + GrContext::kExact_ScratchTexMatch); result->writePixels(0, 0, bitmap->width(), bitmap->height(), desc.fConfig, @@ -158,37 +135,32 @@ static GrTexture* sk_gr_create_bitmap_texture(GrContext* ctx, } } -bool GrIsBitmapInCache(const GrContext* ctx, - const SkBitmap& bitmap, - const GrTextureParams* params) { - GrCacheID cacheID; - generate_bitmap_cache_id(bitmap, &cacheID); - - GrTextureDesc desc; - generate_bitmap_texture_desc(bitmap, &desc); - return ctx->isTextureInCache(desc, cacheID, params); -} +/////////////////////////////////////////////////////////////////////////////// GrTexture* GrLockCachedBitmapTexture(GrContext* ctx, const SkBitmap& bitmap, const GrTextureParams* params) { GrTexture* result = NULL; - bool cache = !bitmap.isVolatile(); - - if (cache) { - // If the bitmap isn't changing try to find a cached copy first. - - GrCacheID cacheID; - generate_bitmap_cache_id(bitmap, &cacheID); + if (!bitmap.isVolatile()) { + // If the bitmap isn't changing try to find a cached copy first + uint64_t key = bitmap.getGenerationID(); + key |= ((uint64_t) bitmap.pixelRefOffset()) << 32; GrTextureDesc desc; - generate_bitmap_texture_desc(bitmap, &desc); + desc.fWidth = bitmap.width(); + desc.fHeight = bitmap.height(); + desc.fConfig = SkBitmapConfig2GrPixelConfig(bitmap.config()); - result = ctx->findTexture(desc, cacheID, params); - } - if (NULL == result) { - result = sk_gr_create_bitmap_texture(ctx, cache, params, bitmap); + GrCacheData cacheData(key); + + result = ctx->findTexture(desc, cacheData, params); + if (NULL == result) { + // didn't find a cached copy so create one + result = sk_gr_create_bitmap_texture(ctx, key, params, bitmap); + } + } else { + result = sk_gr_create_bitmap_texture(ctx, GrCacheData::kScratch_CacheID, params, bitmap); } if (NULL == result) { GrPrintf("---- failed to create texture for cache [%d %d]\n", diff --git a/src/gpu/effects/GrTextureStripAtlas.cpp b/src/gpu/effects/GrTextureStripAtlas.cpp index 30d7ce4d48..92f5ad5c6f 100644 --- a/src/gpu/effects/GrTextureStripAtlas.cpp +++ b/src/gpu/effects/GrTextureStripAtlas.cpp @@ -17,6 +17,9 @@ #define VALIDATE #endif +GR_DEFINE_RESOURCE_CACHE_DOMAIN(GrTextureStripAtlas, GetTextureStripAtlasDomain) + + int32_t GrTextureStripAtlas::gCacheCount = 0; GrTHashTable<GrTextureStripAtlas::AtlasEntry, @@ -70,7 +73,7 @@ GrTextureStripAtlas* GrTextureStripAtlas::GetAtlas(const GrTextureStripAtlas::De } GrTextureStripAtlas::GrTextureStripAtlas(GrTextureStripAtlas::Desc desc) - : fCacheKey(sk_atomic_inc(&gCacheCount)) + : fCacheID(sk_atomic_inc(&gCacheCount)) , fLockedRows(0) , fDesc(desc) , fNumRows(desc.fHeight / desc.fRowHeight) @@ -195,16 +198,11 @@ void GrTextureStripAtlas::lockTexture() { texDesc.fWidth = fDesc.fWidth; texDesc.fHeight = fDesc.fHeight; texDesc.fConfig = fDesc.fConfig; - - static const GrCacheID::Domain gTextureStripAtlasDomain = GrCacheID::GenerateDomain(); - GrCacheID::Key key; - *key.fData32 = fCacheKey; - memset(key.fData32 + 1, 0, sizeof(key) - sizeof(uint32_t)); - GrCacheID cacheID(gTextureStripAtlasDomain, key); - - fTexture = fDesc.fContext->findTexture(texDesc, cacheID, ¶ms); + GrCacheData cacheData(fCacheID); + cacheData.fResourceDomain = GetTextureStripAtlasDomain(); + fTexture = fDesc.fContext->findTexture(texDesc, cacheData, ¶ms); if (NULL == fTexture) { - fTexture = fDesc.fContext->createTexture(¶ms, texDesc, cacheID, NULL, 0); + fTexture = fDesc.fContext->createTexture(¶ms, texDesc, cacheData, NULL, 0); // This is a new texture, so all of our cache info is now invalid this->initLRU(); fKeyTable.rewind(); diff --git a/src/gpu/effects/GrTextureStripAtlas.h b/src/gpu/effects/GrTextureStripAtlas.h index 1e1e5088c7..210d88ec90 100644 --- a/src/gpu/effects/GrTextureStripAtlas.h +++ b/src/gpu/effects/GrTextureStripAtlas.h @@ -21,6 +21,8 @@ */ class GrTextureStripAtlas { public: + GR_DECLARE_RESOURCE_CACHE_DOMAIN(GetTextureStripAtlasDomain) + /** * Descriptor struct which we'll use as a hash table key **/ @@ -155,7 +157,7 @@ private: // A unique ID for this texture (formed with: gCacheCount++), so we can be sure that if we // get a texture back from the texture cache, that it's the same one we last used. - const int32_t fCacheKey; + const uint64_t fCacheID; // Total locks on all rows (when this reaches zero, we can unlock our texture) int32_t fLockedRows; |