aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/GrBufferAllocPool.cpp
diff options
context:
space:
mode:
authorGravatar robertphillips <robertphillips@google.com>2015-06-23 08:23:08 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-06-23 08:23:08 -0700
commite935f1a0e2351373c33600b8388492ce1218014a (patch)
tree9182150ea7c4cedda75dc2b9d2f6027937c7aed8 /src/gpu/GrBufferAllocPool.cpp
parent591a2ca842cb34919695a32757d700dee8b7822c (diff)
Refactor GrBufferAllocPools to use resource cache
Diffstat (limited to 'src/gpu/GrBufferAllocPool.cpp')
-rw-r--r--src/gpu/GrBufferAllocPool.cpp124
1 files changed, 35 insertions, 89 deletions
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
index be88b32cb1..e9172108f3 100644
--- a/src/gpu/GrBufferAllocPool.cpp
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -9,8 +9,10 @@
#include "GrBufferAllocPool.h"
#include "GrCaps.h"
+#include "GrContext.h"
#include "GrGpu.h"
#include "GrIndexBuffer.h"
+#include "GrResourceProvider.h"
#include "GrTypes.h"
#include "GrVertexBuffer.h"
@@ -22,6 +24,9 @@
static void VALIDATE(bool = false) {}
#endif
+static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
+static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
+
// page size
#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
@@ -37,9 +42,8 @@ do {
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
BufferType bufferType,
- size_t blockSize,
- int preallocBufferCnt)
- : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
+ size_t blockSize)
+ : fBlocks(8) {
fGpu = SkRef(gpu);
@@ -49,19 +53,10 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
fBytesInUse = 0;
- fPreallocBuffersInUse = 0;
- fPreallocBufferStartIdx = 0;
- for (int i = 0; i < preallocBufferCnt; ++i) {
- GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
- if (buffer) {
- *fPreallocBuffers.append() = buffer;
- }
- }
fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
}
-GrBufferAllocPool::~GrBufferAllocPool() {
- VALIDATE();
+void GrBufferAllocPool::deleteBlocks() {
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isMapped()) {
@@ -71,34 +66,22 @@ GrBufferAllocPool::~GrBufferAllocPool() {
while (!fBlocks.empty()) {
this->destroyBlock();
}
- fPreallocBuffers.unrefAll();
+ SkASSERT(!fBufferPtr);
+}
+
+GrBufferAllocPool::~GrBufferAllocPool() {
+ VALIDATE();
+ this->deleteBlocks();
fGpu->unref();
}
void GrBufferAllocPool::reset() {
VALIDATE();
fBytesInUse = 0;
- if (fBlocks.count()) {
- GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
- if (buffer->isMapped()) {
- UNMAP_BUFFER(fBlocks.back());
- }
- }
- // fPreallocBuffersInUse will be decremented down to zero in the while loop
- int preallocBuffersInUse = fPreallocBuffersInUse;
- while (!fBlocks.empty()) {
- this->destroyBlock();
- }
- if (fPreallocBuffers.count()) {
- // must set this after above loop.
- fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
- preallocBuffersInUse) %
- fPreallocBuffers.count();
- }
+ this->deleteBlocks();
// we may have created a large cpu mirror of a large VB. Reset the size
- // to match our pre-allocated VBs.
+ // to match our minimum.
fCpuData.reset(fMinBlockSize);
- SkASSERT(0 == fPreallocBuffersInUse);
VALIDATE();
}
@@ -170,8 +153,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
if (fBufferPtr) {
BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
- size_t pad = GrSizeAlignUpPad(usedBytes,
- alignment);
+ size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
if ((size + pad) <= back.fBytesFree) {
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
usedBytes += pad;
@@ -209,12 +191,6 @@ void* GrBufferAllocPool::makeSpace(size_t size,
void GrBufferAllocPool::putBack(size_t bytes) {
VALIDATE();
- // if the putBack unwinds all the preallocated buffers then we will
- // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
- // will be decremented. I will reach zero if all blocks using preallocated
- // buffers are released.
- int preallocBuffersInUse = fPreallocBuffersInUse;
-
while (bytes) {
// caller shouldn't try to put back more than they've taken
SkASSERT(!fBlocks.empty());
@@ -236,11 +212,7 @@ void GrBufferAllocPool::putBack(size_t bytes) {
break;
}
}
- if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
- fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
- preallocBuffersInUse) %
- fPreallocBuffers.count();
- }
+
VALIDATE();
}
@@ -253,24 +225,13 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
BufferBlock& block = fBlocks.push_back();
- if (size == fMinBlockSize &&
- fPreallocBuffersInUse < fPreallocBuffers.count()) {
-
- uint32_t nextBuffer = (fPreallocBuffersInUse +
- fPreallocBufferStartIdx) %
- fPreallocBuffers.count();
- block.fBuffer = fPreallocBuffers[nextBuffer];
- block.fBuffer->ref();
- ++fPreallocBuffersInUse;
- } else {
- block.fBuffer = this->createBuffer(size);
- if (NULL == block.fBuffer) {
- fBlocks.pop_back();
- return false;
- }
+ block.fBuffer = this->getBuffer(size);
+ if (NULL == block.fBuffer) {
+ fBlocks.pop_back();
+ return false;
}
- block.fBytesFree = size;
+ block.fBytesFree = block.fBuffer->gpuMemorySize();
if (fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
@@ -297,7 +258,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
}
if (NULL == fBufferPtr) {
- fBufferPtr = fCpuData.reset(size);
+ fBufferPtr = fCpuData.reset(block.fBytesFree);
}
VALIDATE(true);
@@ -309,15 +270,7 @@ void GrBufferAllocPool::destroyBlock() {
SkASSERT(!fBlocks.empty());
BufferBlock& block = fBlocks.back();
- if (fPreallocBuffersInUse > 0) {
- uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
- fPreallocBufferStartIdx +
- (fPreallocBuffers.count() - 1)) %
- fPreallocBuffers.count();
- if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
- --fPreallocBuffersInUse;
- }
- }
+
SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
@@ -345,24 +298,22 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
VALIDATE(true);
}
-GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
+GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
+
+ GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
+
if (kIndex_BufferType == fBufferType) {
- return fGpu->createIndexBuffer(size, true);
+ return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
} else {
SkASSERT(kVertex_BufferType == fBufferType);
- return fGpu->createVertexBuffer(size, true);
+ return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
}
}
////////////////////////////////////////////////////////////////////////////////
-GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
- size_t bufferSize,
- int preallocBufferCnt)
- : GrBufferAllocPool(gpu,
- kVertex_BufferType,
- bufferSize,
- preallocBufferCnt) {
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
+ : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
}
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
@@ -389,13 +340,8 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
////////////////////////////////////////////////////////////////////////////////
-GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
- size_t bufferSize,
- int preallocBufferCnt)
- : GrBufferAllocPool(gpu,
- kIndex_BufferType,
- bufferSize,
- preallocBufferCnt) {
+GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
+ : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
}
void* GrIndexBufferAllocPool::makeSpace(int indexCount,