From 19c6250cad89d0e06a737919da94d6bc5c85cb1a Mon Sep 17 00:00:00 2001 From: robertphillips Date: Wed, 6 Jan 2016 07:04:46 -0800 Subject: Add debug sentinel to GrMemoryPool to check for memory stomping GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1553233006 Review URL: https://codereview.chromium.org/1553233006 --- src/gpu/GrMemoryPool.cpp | 17 +++++++++++++---- src/gpu/GrMemoryPool.h | 12 +++++++++++- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/gpu/GrMemoryPool.cpp b/src/gpu/GrMemoryPool.cpp index 1e005857b2..0fd7e90f66 100644 --- a/src/gpu/GrMemoryPool.cpp +++ b/src/gpu/GrMemoryPool.cpp @@ -40,8 +40,8 @@ GrMemoryPool::~GrMemoryPool() { void* GrMemoryPool::allocate(size_t size) { VALIDATE; - size = GrSizeAlignUp(size, kAlignment); size += kPerAllocPad; + size = GrSizeAlignUp(size, kAlignment); if (fTail->fFreeSize < size) { size_t blockSize = size; blockSize = SkTMax(blockSize, fMinAllocSize); @@ -59,7 +59,9 @@ void* GrMemoryPool::allocate(size_t size) { intptr_t ptr = fTail->fCurrPtr; // We stash a pointer to the block header, just before the allocated space, // so that we can decrement the live count on delete in constant time. - *reinterpret_cast(ptr) = fTail; + AllocHeader* allocData = reinterpret_cast(ptr); + SkDEBUGCODE(allocData->fSentinal = kAssignedMarker); + allocData->fHeader = fTail; ptr += kPerAllocPad; fTail->fPrevPtr = fTail->fCurrPtr; fTail->fCurrPtr += size; @@ -74,7 +76,10 @@ void* GrMemoryPool::allocate(size_t size) { void GrMemoryPool::release(void* p) { VALIDATE; intptr_t ptr = reinterpret_cast(p) - kPerAllocPad; - BlockHeader* block = *reinterpret_cast(ptr); + AllocHeader* allocData = reinterpret_cast(ptr); + SkASSERT(kAssignedMarker == allocData->fSentinal); + SkDEBUGCODE(allocData->fSentinal = kFreedMarker); + BlockHeader* block = allocData->fHeader; if (1 == block->fLiveCount) { // the head block is special, it is reset rather than deleted if (fHead == block) { @@ -159,8 +164,12 @@ void GrMemoryPool::validate() { SkASSERT(ptrOffset == kHeaderSize); SkASSERT(userStart == block->fCurrPtr); } else { - SkASSERT(block == *reinterpret_cast(userStart)); + AllocHeader* allocData = reinterpret_cast(userStart); + SkASSERT(allocData->fSentinal == kAssignedMarker || + allocData->fSentinal == kFreedMarker); + SkASSERT(block == allocData->fHeader); } + prev = block; } while ((block = block->fNext)); SkASSERT(allocCount == fAllocationCnt); diff --git a/src/gpu/GrMemoryPool.h b/src/gpu/GrMemoryPool.h index 5e38a29c04..1dd1732ea9 100644 --- a/src/gpu/GrMemoryPool.h +++ b/src/gpu/GrMemoryPool.h @@ -68,11 +68,21 @@ private: size_t fSize; ///< total allocated size of the block }; + static const uint32_t kAssignedMarker = 0xCDCDCDCD; + static const uint32_t kFreedMarker = 0xEFEFEFEF; + + struct AllocHeader { +#ifdef SK_DEBUG + uint32_t fSentinal; ///< known value to check for memory stomping (e.g., (CD)*) +#endif + BlockHeader* fHeader; ///< pointer back to the block header in which an alloc resides + }; + enum { // We assume this alignment is good enough for everybody. kAlignment = 8, kHeaderSize = GR_CT_ALIGN_UP(sizeof(BlockHeader), kAlignment), - kPerAllocPad = GR_CT_ALIGN_UP(sizeof(BlockHeader*), kAlignment), + kPerAllocPad = GR_CT_ALIGN_UP(sizeof(AllocHeader), kAlignment), }; size_t fSize; size_t fPreallocSize; -- cgit v1.2.3