aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/GrBatchTarget.cpp19
-rw-r--r--src/gpu/GrBatchTarget.h16
-rw-r--r--src/gpu/GrBufferAllocPool.cpp124
-rw-r--r--src/gpu/GrBufferAllocPool.h32
-rw-r--r--src/gpu/GrCaps.cpp2
-rw-r--r--src/gpu/GrGpu.cpp12
-rw-r--r--src/gpu/GrIndexBuffer.h17
-rw-r--r--src/gpu/GrResourceProvider.cpp57
-rw-r--r--src/gpu/GrResourceProvider.h3
-rw-r--r--src/gpu/GrVertexBuffer.h17
-rw-r--r--src/gpu/gl/GrGLCaps.cpp5
11 files changed, 143 insertions, 161 deletions
diff --git a/src/gpu/GrBatchTarget.cpp b/src/gpu/GrBatchTarget.cpp
index b6dadd699b..31b4cc9f47 100644
--- a/src/gpu/GrBatchTarget.cpp
+++ b/src/gpu/GrBatchTarget.cpp
@@ -10,16 +10,27 @@
#include "GrBatchAtlas.h"
#include "GrPipeline.h"
+static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
+static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
+
+static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
+static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
+
GrBatchTarget::GrBatchTarget(GrGpu* gpu)
: fGpu(gpu)
- , fVertexPool(gpu)
- , fIndexPool(gpu)
, fFlushBuffer(kFlushBufferInitialSizeInBytes)
, fIter(fFlushBuffer)
, fNumberOfDraws(0)
, fCurrentToken(0)
, fLastFlushedToken(0)
, fInlineUpdatesIndex(0) {
+
+ fVertexPool.reset(SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu,
+ DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
+ DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)));
+ fIndexPool.reset(SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu,
+ DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
+ DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)));
}
void GrBatchTarget::flushNext(int n) {
@@ -54,11 +65,11 @@ void GrBatchTarget::flushNext(int n) {
void* GrBatchTarget::makeVertSpace(size_t vertexSize, int vertexCount,
const GrVertexBuffer** buffer, int* startVertex) {
- return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
+ return fVertexPool->makeSpace(vertexSize, vertexCount, buffer, startVertex);
}
uint16_t* GrBatchTarget::makeIndexSpace(int indexCount,
const GrIndexBuffer** buffer, int* startIndex) {
- return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
+ return reinterpret_cast<uint16_t*>(fIndexPool->makeSpace(indexCount, buffer, startIndex));
}
diff --git a/src/gpu/GrBatchTarget.h b/src/gpu/GrBatchTarget.h
index 19cfb09cb7..80a8b9bad3 100644
--- a/src/gpu/GrBatchTarget.h
+++ b/src/gpu/GrBatchTarget.h
@@ -115,26 +115,26 @@ public:
const GrIndexBuffer** buffer, int* startIndex);
// A helper for draws which overallocate and then return data to the pool
- void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
+ void putBackIndices(size_t indices) { fIndexPool->putBack(indices * sizeof(uint16_t)); }
void putBackVertices(size_t vertices, size_t vertexStride) {
- fVertexPool.putBack(vertices * vertexStride);
+ fVertexPool->putBack(vertices * vertexStride);
}
void reset() {
- fVertexPool.reset();
- fIndexPool.reset();
+ fVertexPool->reset();
+ fIndexPool->reset();
}
private:
void unmapVertexAndIndexBuffers() {
- fVertexPool.unmap();
- fIndexPool.unmap();
+ fVertexPool->unmap();
+ fIndexPool->unmap();
}
GrGpu* fGpu;
- GrVertexBufferAllocPool fVertexPool;
- GrIndexBufferAllocPool fIndexPool;
+ SkAutoTDelete<GrVertexBufferAllocPool> fVertexPool;
+ SkAutoTDelete<GrIndexBufferAllocPool> fIndexPool;
typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
index e9172108f3..be88b32cb1 100644
--- a/src/gpu/GrBufferAllocPool.cpp
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -9,10 +9,8 @@
#include "GrBufferAllocPool.h"
#include "GrCaps.h"
-#include "GrContext.h"
#include "GrGpu.h"
#include "GrIndexBuffer.h"
-#include "GrResourceProvider.h"
#include "GrTypes.h"
#include "GrVertexBuffer.h"
@@ -24,9 +22,6 @@
static void VALIDATE(bool = false) {}
#endif
-static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
-static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
-
// page size
#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
@@ -42,8 +37,9 @@ do {
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
BufferType bufferType,
- size_t blockSize)
- : fBlocks(8) {
+ size_t blockSize,
+ int preallocBufferCnt)
+ : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
fGpu = SkRef(gpu);
@@ -53,10 +49,19 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
fBytesInUse = 0;
+ fPreallocBuffersInUse = 0;
+ fPreallocBufferStartIdx = 0;
+ for (int i = 0; i < preallocBufferCnt; ++i) {
+ GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
+ if (buffer) {
+ *fPreallocBuffers.append() = buffer;
+ }
+ }
fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
}
-void GrBufferAllocPool::deleteBlocks() {
+GrBufferAllocPool::~GrBufferAllocPool() {
+ VALIDATE();
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isMapped()) {
@@ -66,22 +71,34 @@ void GrBufferAllocPool::deleteBlocks() {
while (!fBlocks.empty()) {
this->destroyBlock();
}
- SkASSERT(!fBufferPtr);
-}
-
-GrBufferAllocPool::~GrBufferAllocPool() {
- VALIDATE();
- this->deleteBlocks();
+ fPreallocBuffers.unrefAll();
fGpu->unref();
}
void GrBufferAllocPool::reset() {
VALIDATE();
fBytesInUse = 0;
- this->deleteBlocks();
+ if (fBlocks.count()) {
+ GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
+ if (buffer->isMapped()) {
+ UNMAP_BUFFER(fBlocks.back());
+ }
+ }
+ // fPreallocBuffersInUse will be decremented down to zero in the while loop
+ int preallocBuffersInUse = fPreallocBuffersInUse;
+ while (!fBlocks.empty()) {
+ this->destroyBlock();
+ }
+ if (fPreallocBuffers.count()) {
+ // must set this after above loop.
+ fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
+ preallocBuffersInUse) %
+ fPreallocBuffers.count();
+ }
// we may have created a large cpu mirror of a large VB. Reset the size
- // to match our minimum.
+ // to match our pre-allocated VBs.
fCpuData.reset(fMinBlockSize);
+ SkASSERT(0 == fPreallocBuffersInUse);
VALIDATE();
}
@@ -153,7 +170,8 @@ void* GrBufferAllocPool::makeSpace(size_t size,
if (fBufferPtr) {
BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
- size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
+ size_t pad = GrSizeAlignUpPad(usedBytes,
+ alignment);
if ((size + pad) <= back.fBytesFree) {
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
usedBytes += pad;
@@ -191,6 +209,12 @@ void* GrBufferAllocPool::makeSpace(size_t size,
void GrBufferAllocPool::putBack(size_t bytes) {
VALIDATE();
+ // if the putBack unwinds all the preallocated buffers then we will
+ // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
+ // will be decremented. I will reach zero if all blocks using preallocated
+ // buffers are released.
+ int preallocBuffersInUse = fPreallocBuffersInUse;
+
while (bytes) {
// caller shouldn't try to put back more than they've taken
SkASSERT(!fBlocks.empty());
@@ -212,7 +236,11 @@ void GrBufferAllocPool::putBack(size_t bytes) {
break;
}
}
-
+ if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
+ fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
+ preallocBuffersInUse) %
+ fPreallocBuffers.count();
+ }
VALIDATE();
}
@@ -225,13 +253,24 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
BufferBlock& block = fBlocks.push_back();
- block.fBuffer = this->getBuffer(size);
- if (NULL == block.fBuffer) {
- fBlocks.pop_back();
- return false;
+ if (size == fMinBlockSize &&
+ fPreallocBuffersInUse < fPreallocBuffers.count()) {
+
+ uint32_t nextBuffer = (fPreallocBuffersInUse +
+ fPreallocBufferStartIdx) %
+ fPreallocBuffers.count();
+ block.fBuffer = fPreallocBuffers[nextBuffer];
+ block.fBuffer->ref();
+ ++fPreallocBuffersInUse;
+ } else {
+ block.fBuffer = this->createBuffer(size);
+ if (NULL == block.fBuffer) {
+ fBlocks.pop_back();
+ return false;
+ }
}
- block.fBytesFree = block.fBuffer->gpuMemorySize();
+ block.fBytesFree = size;
if (fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
@@ -258,7 +297,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
}
if (NULL == fBufferPtr) {
- fBufferPtr = fCpuData.reset(block.fBytesFree);
+ fBufferPtr = fCpuData.reset(size);
}
VALIDATE(true);
@@ -270,7 +309,15 @@ void GrBufferAllocPool::destroyBlock() {
SkASSERT(!fBlocks.empty());
BufferBlock& block = fBlocks.back();
-
+ if (fPreallocBuffersInUse > 0) {
+ uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
+ fPreallocBufferStartIdx +
+ (fPreallocBuffers.count() - 1)) %
+ fPreallocBuffers.count();
+ if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
+ --fPreallocBuffersInUse;
+ }
+ }
SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
@@ -298,22 +345,24 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
VALIDATE(true);
}
-GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
-
- GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
-
+GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
if (kIndex_BufferType == fBufferType) {
- return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
+ return fGpu->createIndexBuffer(size, true);
} else {
SkASSERT(kVertex_BufferType == fBufferType);
- return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
+ return fGpu->createVertexBuffer(size, true);
}
}
////////////////////////////////////////////////////////////////////////////////
-GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
- : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
+ size_t bufferSize,
+ int preallocBufferCnt)
+ : GrBufferAllocPool(gpu,
+ kVertex_BufferType,
+ bufferSize,
+ preallocBufferCnt) {
}
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
@@ -340,8 +389,13 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
////////////////////////////////////////////////////////////////////////////////
-GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
- : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
+GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
+ size_t bufferSize,
+ int preallocBufferCnt)
+ : GrBufferAllocPool(gpu,
+ kIndex_BufferType,
+ bufferSize,
+ preallocBufferCnt) {
}
void* GrIndexBufferAllocPool::makeSpace(int indexCount,
diff --git a/src/gpu/GrBufferAllocPool.h b/src/gpu/GrBufferAllocPool.h
index bbcb8a5d7c..cd5d1971da 100644
--- a/src/gpu/GrBufferAllocPool.h
+++ b/src/gpu/GrBufferAllocPool.h
@@ -64,12 +64,16 @@ protected:
* @param bufferSize The minimum size of created buffers.
* This value will be clamped to some
* reasonable minimum.
+ * @param preallocBufferCnt The pool will allocate this number of
+ * buffers at bufferSize and keep them until it
+ * is destroyed.
*/
GrBufferAllocPool(GrGpu* gpu,
BufferType bufferType,
- size_t bufferSize = 0);
+ size_t bufferSize = 0,
+ int preallocBufferCnt = 0);
- virtual ~GrBufferAllocPool();
+ virtual ~GrBufferAllocPool();
/**
* Returns a block of memory to hold data. A buffer designated to hold the
@@ -95,7 +99,7 @@ protected:
const GrGeometryBuffer** buffer,
size_t* offset);
- GrGeometryBuffer* getBuffer(size_t size);
+ GrGeometryBuffer* createBuffer(size_t size);
private:
struct BufferBlock {
@@ -105,7 +109,6 @@ private:
bool createBlock(size_t requestSize);
void destroyBlock();
- void deleteBlocks();
void flushCpuData(const BufferBlock& block, size_t flushSize);
#ifdef SK_DEBUG
void validate(bool unusedBlockAllowed = false) const;
@@ -114,10 +117,15 @@ private:
size_t fBytesInUse;
GrGpu* fGpu;
+ SkTDArray<GrGeometryBuffer*> fPreallocBuffers;
size_t fMinBlockSize;
BufferType fBufferType;
SkTArray<BufferBlock> fBlocks;
+ int fPreallocBuffersInUse;
+ // We attempt to cycle through the preallocated buffers rather than
+ // always starting from the first.
+ int fPreallocBufferStartIdx;
SkAutoMalloc fCpuData;
void* fBufferPtr;
size_t fGeometryBufferMapThreshold;
@@ -134,8 +142,13 @@ public:
* Constructor
*
* @param gpu The GrGpu used to create the vertex buffers.
+ * @param bufferSize The minimum size of created VBs. This value
+ * will be clamped to some reasonable minimum.
+ * @param preallocBufferCnt The pool will allocate this number of VBs at
+ * bufferSize and keep them until it is
+ * destroyed.
*/
- GrVertexBufferAllocPool(GrGpu* gpu);
+ GrVertexBufferAllocPool(GrGpu* gpu, size_t bufferSize = 0, int preallocBufferCnt = 0);
/**
* Returns a block of memory to hold vertices. A buffer designated to hold
@@ -178,8 +191,15 @@ public:
* Constructor
*
* @param gpu The GrGpu used to create the index buffers.
+ * @param bufferSize The minimum size of created IBs. This value
+ * will be clamped to some reasonable minimum.
+ * @param preallocBufferCnt The pool will allocate this number of VBs at
+ * bufferSize and keep them until it is
+ * destroyed.
*/
- GrIndexBufferAllocPool(GrGpu* gpu);
+ GrIndexBufferAllocPool(GrGpu* gpu,
+ size_t bufferSize = 0,
+ int preallocBufferCnt = 0);
/**
* Returns a block of memory to hold indices. A buffer designated to hold
diff --git a/src/gpu/GrCaps.cpp b/src/gpu/GrCaps.cpp
index 900bd65cc2..f2e88b01c4 100644
--- a/src/gpu/GrCaps.cpp
+++ b/src/gpu/GrCaps.cpp
@@ -86,7 +86,6 @@ GrCaps::GrCaps(const GrContextOptions& options) {
fStencilWrapOpsSupport = false;
fDiscardRenderTargetSupport = false;
fReuseScratchTextures = true;
- fReuseScratchBuffers = true;
fGpuTracingSupport = false;
fCompressedTexSubImageSupport = false;
fOversizedStencilSupport = false;
@@ -147,7 +146,6 @@ SkString GrCaps::dump() const {
r.appendf("Stencil Wrap Ops Support : %s\n", gNY[fStencilWrapOpsSupport]);
r.appendf("Discard Render Target Support : %s\n", gNY[fDiscardRenderTargetSupport]);
r.appendf("Reuse Scratch Textures : %s\n", gNY[fReuseScratchTextures]);
- r.appendf("Reuse Scratch Buffers : %s\n", gNY[fReuseScratchBuffers]);
r.appendf("Gpu Tracing Support : %s\n", gNY[fGpuTracingSupport]);
r.appendf("Compressed Update Support : %s\n", gNY[fCompressedTexSubImageSupport]);
r.appendf("Oversized Stencil Support : %s\n", gNY[fOversizedStencilSupport]);
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index 728fb88062..d46d09d5c7 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -199,20 +199,12 @@ GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc&
GrVertexBuffer* GrGpu::createVertexBuffer(size_t size, bool dynamic) {
this->handleDirtyContext();
- GrVertexBuffer* vb = this->onCreateVertexBuffer(size, dynamic);
- if (!this->caps()->reuseScratchBuffers()) {
- vb->resourcePriv().removeScratchKey();
- }
- return vb;
+ return this->onCreateVertexBuffer(size, dynamic);
}
GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) {
this->handleDirtyContext();
- GrIndexBuffer* ib = this->onCreateIndexBuffer(size, dynamic);
- if (!this->caps()->reuseScratchBuffers()) {
- ib->resourcePriv().removeScratchKey();
- }
- return ib;
+ return this->onCreateIndexBuffer(size, dynamic);
}
void GrGpu::clear(const SkIRect* rect,
diff --git a/src/gpu/GrIndexBuffer.h b/src/gpu/GrIndexBuffer.h
index bf64ff87d2..4dfd1c1b07 100644
--- a/src/gpu/GrIndexBuffer.h
+++ b/src/gpu/GrIndexBuffer.h
@@ -13,18 +13,8 @@
#include "GrGeometryBuffer.h"
-
class GrIndexBuffer : public GrGeometryBuffer {
public:
- static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
- static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
-
- GrScratchKey::Builder builder(key, kType, 2);
-
- builder[0] = SkToUInt(size);
- builder[1] = dynamic ? 1 : 0;
- }
-
/**
* Retrieves the maximum number of quads that could be rendered
* from the index buffer (using kTriangles_GrPrimitiveType).
@@ -35,12 +25,7 @@ public:
}
protected:
GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
- GrScratchKey key;
- ComputeScratchKey(gpuMemorySize, dynamic, &key);
- this->setScratchKey(key);
- }
-
+ : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
private:
typedef GrGeometryBuffer INHERITED;
};
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index 3c447bded6..4b9d265129 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -28,7 +28,7 @@ const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16
const GrUniqueKey& key) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
- GrIndexBuffer* buffer = this->getIndexBuffer(bufferSize, /* dynamic = */ false, true);
+ GrIndexBuffer* buffer = this->gpu()->createIndexBuffer(bufferSize, /* dynamic = */ false);
if (!buffer) {
return NULL;
}
@@ -83,58 +83,3 @@ GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf, const SkDesc
return this->gpu()->pathRendering()->createGlyphs(tf, desc, stroke);
}
-GrIndexBuffer* GrResourceProvider::getIndexBuffer(size_t size, bool dynamic,
- bool calledDuringFlush) {
- if (this->isAbandoned()) {
- return NULL;
- }
-
- if (dynamic) {
- // bin by pow2 with a reasonable min
- static const uint32_t MIN_SIZE = 1 << 12;
- size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
-
- GrScratchKey key;
- GrIndexBuffer::ComputeScratchKey(size, dynamic, &key);
- uint32_t scratchFlags = 0;
- if (calledDuringFlush) {
- scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
- } else {
- scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
- }
- GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
- if (resource) {
- return static_cast<GrIndexBuffer*>(resource);
- }
- }
-
- return this->gpu()->createIndexBuffer(size, dynamic);
-}
-
-GrVertexBuffer* GrResourceProvider::getVertexBuffer(size_t size, bool dynamic,
- bool calledDuringFlush) {
- if (this->isAbandoned()) {
- return NULL;
- }
-
- if (dynamic) {
- // bin by pow2 with a reasonable min
- static const uint32_t MIN_SIZE = 1 << 15;
- size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
-
- GrScratchKey key;
- GrVertexBuffer::ComputeScratchKey(size, dynamic, &key);
- uint32_t scratchFlags = 0;
- if (calledDuringFlush) {
- scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
- } else {
- scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
- }
- GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
- if (resource) {
- return static_cast<GrVertexBuffer*>(resource);
- }
- }
-
- return this->gpu()->createVertexBuffer(size, dynamic);
-}
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index f0b79b97f5..2962ad3788 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -86,9 +86,6 @@ public:
using GrTextureProvider::findAndRefResourceByUniqueKey;
using GrTextureProvider::abandon;
- GrIndexBuffer* getIndexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
- GrVertexBuffer* getVertexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
-
private:
const GrIndexBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
int patternSize,
diff --git a/src/gpu/GrVertexBuffer.h b/src/gpu/GrVertexBuffer.h
index 3c12cd76d2..3f2ada2a2c 100644
--- a/src/gpu/GrVertexBuffer.h
+++ b/src/gpu/GrVertexBuffer.h
@@ -14,24 +14,9 @@
#include "GrGeometryBuffer.h"
class GrVertexBuffer : public GrGeometryBuffer {
-public:
- static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
- static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
-
- GrScratchKey::Builder builder(key, kType, 2);
-
- builder[0] = SkToUInt(size);
- builder[1] = dynamic ? 1 : 0;
- }
-
protected:
GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
- GrScratchKey key;
- ComputeScratchKey(gpuMemorySize, dynamic, &key);
- this->setScratchKey(key);
- }
-
+ : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
private:
typedef GrGeometryBuffer INHERITED;
};
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index 0c53006bd2..b23619d8b6 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -414,11 +414,6 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
fReuseScratchTextures = kARM_GrGLVendor != ctxInfo.vendor() &&
kQualcomm_GrGLVendor != ctxInfo.vendor();
-#if 0
- fReuseScratchBuffers = kARM_GrGLVendor != ctxInfo.vendor() &&
- kQualcomm_GrGLVendor != ctxInfo.vendor();
-#endif
-
if (GrGLCaps::kES_IMG_MsToTexture_MSFBOType == fMSFBOType) {
GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES_IMG, &fMaxSampleCount);
} else if (GrGLCaps::kNone_MSFBOType != fMSFBOType) {