aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gpu/GrMemoryPool.cpp32
-rw-r--r--src/gpu/GrMemoryPool.h105
-rw-r--r--src/gpu/instanced/InstancedRendering.cpp4
-rw-r--r--src/gpu/instanced/InstancedRendering.h2
4 files changed, 113 insertions, 30 deletions
diff --git a/src/gpu/GrMemoryPool.cpp b/src/gpu/GrMemoryPool.cpp
index 6bc0f54602..c95b59fc8c 100644
--- a/src/gpu/GrMemoryPool.cpp
+++ b/src/gpu/GrMemoryPool.cpp
@@ -13,17 +13,19 @@
#define VALIDATE
#endif
+constexpr size_t GrMemoryPool::kSmallestMinAllocSize;
+
GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) {
SkDEBUGCODE(fAllocationCnt = 0);
SkDEBUGCODE(fAllocBlockCnt = 0);
- minAllocSize = SkTMax<size_t>(minAllocSize, 1 << 10);
- fMinAllocSize = GrSizeAlignUp(minAllocSize + kPerAllocPad, kAlignment);
- fPreallocSize = GrSizeAlignUp(preallocSize + kPerAllocPad, kAlignment);
- fPreallocSize = SkTMax(fPreallocSize, fMinAllocSize);
+ minAllocSize = SkTMax<size_t>(GrSizeAlignUp(minAllocSize, kAlignment), kSmallestMinAllocSize);
+ preallocSize = SkTMax<size_t>(GrSizeAlignUp(preallocSize, kAlignment), minAllocSize);
+
+ fMinAllocSize = minAllocSize;
fSize = 0;
- fHead = CreateBlock(fPreallocSize);
+ fHead = CreateBlock(preallocSize);
fTail = fHead;
fHead->fNext = nullptr;
fHead->fPrev = nullptr;
@@ -43,7 +45,7 @@ void* GrMemoryPool::allocate(size_t size) {
size += kPerAllocPad;
size = GrSizeAlignUp(size, kAlignment);
if (fTail->fFreeSize < size) {
- size_t blockSize = size;
+ size_t blockSize = size + kHeaderSize;
blockSize = SkTMax<size_t>(blockSize, fMinAllocSize);
BlockHeader* block = CreateBlock(blockSize);
@@ -87,7 +89,7 @@ void GrMemoryPool::release(void* p) {
if (fHead == block) {
fHead->fCurrPtr = reinterpret_cast<intptr_t>(fHead) + kHeaderSize;
fHead->fLiveCount = 0;
- fHead->fFreeSize = fPreallocSize;
+ fHead->fFreeSize = fHead->fSize - kHeaderSize;
} else {
BlockHeader* prev = block->fPrev;
BlockHeader* next = block->fNext;
@@ -115,18 +117,18 @@ void GrMemoryPool::release(void* p) {
VALIDATE;
}
-GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t size) {
- size_t paddedSize = size + kHeaderSize;
+GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t blockSize) {
+ blockSize = SkTMax<size_t>(blockSize, kHeaderSize);
BlockHeader* block =
- reinterpret_cast<BlockHeader*>(sk_malloc_throw(paddedSize));
+ reinterpret_cast<BlockHeader*>(sk_malloc_throw(blockSize));
// we assume malloc gives us aligned memory
SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment));
SkDEBUGCODE(block->fBlockSentinal = kAssignedMarker);
block->fLiveCount = 0;
- block->fFreeSize = size;
+ block->fFreeSize = blockSize - kHeaderSize;
block->fCurrPtr = reinterpret_cast<intptr_t>(block) + kHeaderSize;
block->fPrevPtr = 0; // gcc warns on assigning nullptr to an intptr_t.
- block->fSize = paddedSize;
+ block->fSize = blockSize;
return block;
}
@@ -153,18 +155,16 @@ void GrMemoryPool::validate() {
intptr_t b = reinterpret_cast<intptr_t>(block);
size_t ptrOffset = block->fCurrPtr - b;
size_t totalSize = ptrOffset + block->fFreeSize;
- size_t userSize = totalSize - kHeaderSize;
intptr_t userStart = b + kHeaderSize;
SkASSERT(!(b % kAlignment));
SkASSERT(!(totalSize % kAlignment));
- SkASSERT(!(userSize % kAlignment));
SkASSERT(!(block->fCurrPtr % kAlignment));
if (fHead != block) {
SkASSERT(block->fLiveCount);
- SkASSERT(userSize >= fMinAllocSize);
+ SkASSERT(totalSize >= fMinAllocSize);
} else {
- SkASSERT(userSize == fPreallocSize);
+ SkASSERT(totalSize == block->fSize);
}
if (!block->fLiveCount) {
SkASSERT(ptrOffset == kHeaderSize);
diff --git a/src/gpu/GrMemoryPool.h b/src/gpu/GrMemoryPool.h
index 43826d354a..e483aab6f2 100644
--- a/src/gpu/GrMemoryPool.h
+++ b/src/gpu/GrMemoryPool.h
@@ -13,16 +13,23 @@
/**
* Allocates memory in blocks and parcels out space in the blocks for allocation
* requests. It is optimized for allocate / release speed over memory
- * effeciency. The interface is designed to be used to implement operator new
+ * efficiency. The interface is designed to be used to implement operator new
* and delete overrides. All allocations are expected to be released before the
* pool's destructor is called. Allocations will be 8-byte aligned.
*/
class GrMemoryPool {
public:
/**
- * Prealloc size is the amount of space to make available at pool creation
- * time and keep around until pool destruction. The min alloc size is the
- * smallest allowed size of additional allocations.
+ * Prealloc size is the amount of space to allocate at pool creation
+ * time and keep around until pool destruction. The min alloc size is
+ * the smallest allowed size of additional allocations. Both sizes are
+ * adjusted to ensure that:
+ * 1. they are are 8-byte aligned
+ * 2. minAllocSize >= kSmallestMinAllocSize
+ * 3. preallocSize >= minAllocSize
+ *
+ * Both sizes is what the pool will end up allocating from the system, and
+ * portions of the allocated memory is used for internal bookkeeping.
*/
GrMemoryPool(size_t preallocSize, size_t minAllocSize);
@@ -48,6 +55,16 @@ public:
*/
size_t size() const { return fSize; }
+ /**
+ * Returns the preallocated size of the GrMemoryPool
+ */
+ size_t preallocSize() const { return fHead->fSize; }
+
+ /**
+ * Minimum value of minAllocSize constructor argument.
+ */
+ constexpr static size_t kSmallestMinAllocSize = 1 << 10;
+
private:
struct BlockHeader;
@@ -81,14 +98,7 @@ private:
BlockHeader* fHeader; ///< pointer back to the block header in which an alloc resides
};
- enum {
- // We assume this alignment is good enough for everybody.
- kAlignment = 8,
- kHeaderSize = GR_CT_ALIGN_UP(sizeof(BlockHeader), kAlignment),
- kPerAllocPad = GR_CT_ALIGN_UP(sizeof(AllocHeader), kAlignment),
- };
size_t fSize;
- size_t fPreallocSize;
size_t fMinAllocSize;
BlockHeader* fHead;
BlockHeader* fTail;
@@ -96,6 +106,79 @@ private:
int fAllocationCnt;
int fAllocBlockCnt;
#endif
+
+protected:
+ enum {
+ // We assume this alignment is good enough for everybody.
+ kAlignment = 8,
+ kHeaderSize = GR_CT_ALIGN_UP(sizeof(BlockHeader), kAlignment),
+ kPerAllocPad = GR_CT_ALIGN_UP(sizeof(AllocHeader), kAlignment),
+ };
+};
+
+/**
+ * Variant of GrMemoryPool that can only allocate objects of a single type. It is
+ * not as flexible as GrMemoryPool, but it has more convenient allocate() method,
+ * and more importantly, it guarantees number of objects that are preallocated at
+ * construction or when adding a new memory block. I.e.
+ *
+ * GrMemoryPool pool(3 * sizeof(T), 1000 * sizeof(T));
+ * pool.allocate(sizeof(T));
+ * pool.allocate(sizeof(T));
+ * pool.allocate(sizeof(T));
+ *
+ * will preallocate 3 * sizeof(T) bytes and use some of those bytes for internal
+ * structures. Because of that, last allocate() call will end up allocating a new
+ * block of 1000 * sizeof(T) bytes. In contrast,
+ *
+ * GrObjectMemoryPool<T> pool(3, 1000);
+ * pool.allocate();
+ * pool.allocate();
+ * pool.allocate();
+ *
+ * guarantees to preallocate enough memory for 3 objects of sizeof(T), so last
+ * allocate() will use preallocated memory and won't cause allocation of a new block.
+ *
+ * Same thing is true for the second (minAlloc) ctor argument: this class guarantees
+ * that a newly added block will have enough space for 1000 objects of sizeof(T), while
+ * GrMemoryPool does not.
+ */
+template <class T>
+class GrObjectMemoryPool: public GrMemoryPool {
+public:
+ /**
+ * Preallocates memory for preallocCount objects, and sets new block size to be
+ * enough to hold minAllocCount objects.
+ */
+ GrObjectMemoryPool(size_t preallocCount, size_t minAllocCount)
+ : GrMemoryPool(CountToSize(preallocCount),
+ CountToSize(SkTMax(minAllocCount, kSmallestMinAllocCount))) {
+ }
+
+ /**
+ * Allocates memory for an object, but doesn't construct or otherwise initialize it.
+ * The memory must be freed with release().
+ */
+ T* allocate() { return static_cast<T*>(GrMemoryPool::allocate(sizeof(T))); }
+
+private:
+ constexpr static size_t kTotalObjectSize =
+ kPerAllocPad + GR_CT_ALIGN_UP(sizeof(T), kAlignment);
+
+ constexpr static size_t CountToSize(size_t count) {
+ return kHeaderSize + count * kTotalObjectSize;
+ }
+
+public:
+ /**
+ * Minimum value of minAllocCount constructor argument.
+ */
+ constexpr static size_t kSmallestMinAllocCount =
+ (GrMemoryPool::kSmallestMinAllocSize - kHeaderSize + kTotalObjectSize - 1) /
+ kTotalObjectSize;
};
+template <class T>
+constexpr size_t GrObjectMemoryPool<T>::kSmallestMinAllocCount;
+
#endif
diff --git a/src/gpu/instanced/InstancedRendering.cpp b/src/gpu/instanced/InstancedRendering.cpp
index 30ec5ce0d2..99a749ad04 100644
--- a/src/gpu/instanced/InstancedRendering.cpp
+++ b/src/gpu/instanced/InstancedRendering.cpp
@@ -18,7 +18,7 @@ namespace gr_instanced {
InstancedRendering::InstancedRendering(GrGpu* gpu)
: fGpu(SkRef(gpu)),
fState(State::kRecordingDraws),
- fDrawPool(1024 * sizeof(Batch::Draw), 1024 * sizeof(Batch::Draw)) {
+ fDrawPool(1024, 1024) {
}
GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix,
@@ -241,7 +241,7 @@ InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir)
fIsTracked(false),
fNumDraws(1),
fNumChangesInGeometry(0) {
- fHeadDraw = fTailDraw = (Draw*)fInstancedRendering->fDrawPool.allocate(sizeof(Draw));
+ fHeadDraw = fTailDraw = fInstancedRendering->fDrawPool.allocate();
#ifdef SK_DEBUG
fHeadDraw->fGeometry = {-1, 0};
#endif
diff --git a/src/gpu/instanced/InstancedRendering.h b/src/gpu/instanced/InstancedRendering.h
index 1325370a28..97d6e1963d 100644
--- a/src/gpu/instanced/InstancedRendering.h
+++ b/src/gpu/instanced/InstancedRendering.h
@@ -173,7 +173,7 @@ private:
const sk_sp<GrGpu> fGpu;
State fState;
- GrMemoryPool fDrawPool;
+ GrObjectMemoryPool<Batch::Draw> fDrawPool;
SkSTArray<1024, ParamsTexel, true> fParams;
BatchList fTrackedBatches;
sk_sp<const GrBuffer> fVertexBuffer;