aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar cdalton <cdalton@nvidia.com>2016-03-25 01:54:54 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2016-03-25 01:54:55 -0700
commit8b1bff29675afd25843439eade634a57f68fe16f (patch)
tree07e7676e54f5520b14e2e1896c64e96b4f61f3c3 /src
parent6b3eacb0dfaeb3374d410c8c041bd39cd066e1ea (diff)
Consolidate GPU buffer implementations
Consolidates all the different buffer implementations into a single GrBuffer class. This will allow us to add new buffer types, use DSA in OpenGL, track buffer bindings by unique ID, cache buffers without respect to the type of data they have been used for previously, etc. This change is strictly a refactor; it introduces no change in functionality. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1825393002 Review URL: https://codereview.chromium.org/1825393002
Diffstat (limited to 'src')
-rw-r--r--src/gpu/GrBatchAtlas.cpp1
-rw-r--r--src/gpu/GrBatchFlushState.cpp4
-rw-r--r--src/gpu/GrBatchFlushState.h8
-rw-r--r--src/gpu/GrBuffer.h145
-rw-r--r--src/gpu/GrBufferAllocPool.cpp45
-rw-r--r--src/gpu/GrBufferAllocPool.h35
-rw-r--r--src/gpu/GrCaps.cpp2
-rw-r--r--src/gpu/GrDrawTarget.cpp1
-rw-r--r--src/gpu/GrDrawTarget.h3
-rw-r--r--src/gpu/GrGeometryBuffer.h124
-rw-r--r--src/gpu/GrGpu.cpp33
-rw-r--r--src/gpu/GrGpu.h68
-rw-r--r--src/gpu/GrIndexBuffer.h51
-rw-r--r--src/gpu/GrMesh.h22
-rw-r--r--src/gpu/GrOvalRenderer.cpp8
-rw-r--r--src/gpu/GrResourceProvider.cpp74
-rw-r--r--src/gpu/GrResourceProvider.h49
-rw-r--r--src/gpu/GrSoftwarePathRenderer.cpp1
-rw-r--r--src/gpu/GrTest.cpp10
-rwxr-xr-xsrc/gpu/GrTransferBuffer.h76
-rw-r--r--src/gpu/GrVertexBuffer.h42
-rw-r--r--src/gpu/batches/GrAAConvexPathRenderer.cpp8
-rw-r--r--src/gpu/batches/GrAADistanceFieldPathRenderer.cpp11
-rw-r--r--src/gpu/batches/GrAAFillRectBatch.cpp4
-rw-r--r--src/gpu/batches/GrAAHairLinePathRenderer.cpp15
-rw-r--r--src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrAAStrokeRectBatch.cpp9
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.cpp5
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.h4
-rw-r--r--src/gpu/batches/GrDefaultPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.cpp4
-rw-r--r--src/gpu/batches/GrNinePatch.cpp2
-rw-r--r--src/gpu/batches/GrNonAAFillRectBatch.cpp2
-rw-r--r--src/gpu/batches/GrNonAAStrokeRectBatch.cpp2
-rw-r--r--src/gpu/batches/GrPLSPathRenderer.cpp6
-rw-r--r--src/gpu/batches/GrTInstanceBatch.h4
-rw-r--r--src/gpu/batches/GrTessellatingPathRenderer.cpp15
-rw-r--r--src/gpu/batches/GrTestBatch.h1
-rw-r--r--src/gpu/batches/GrVertexBatch.cpp6
-rw-r--r--src/gpu/batches/GrVertexBatch.h2
-rw-r--r--src/gpu/effects/GrDashingEffect.cpp1
-rw-r--r--src/gpu/gl/GrGLBuffer.cpp336
-rw-r--r--src/gpu/gl/GrGLBuffer.h61
-rw-r--r--src/gpu/gl/GrGLBufferImpl.cpp122
-rw-r--r--src/gpu/gl/GrGLBufferImpl.h69
-rw-r--r--src/gpu/gl/GrGLCaps.cpp6
-rw-r--r--src/gpu/gl/GrGLDefines.h2
-rw-r--r--src/gpu/gl/GrGLGpu.cpp241
-rw-r--r--src/gpu/gl/GrGLGpu.h23
-rw-r--r--src/gpu/gl/GrGLIndexBuffer.cpp60
-rw-r--r--src/gpu/gl/GrGLIndexBuffer.h48
-rwxr-xr-xsrc/gpu/gl/GrGLTransferBuffer.cpp51
-rwxr-xr-xsrc/gpu/gl/GrGLTransferBuffer.h48
-rw-r--r--src/gpu/gl/GrGLVertexArray.h2
-rw-r--r--src/gpu/gl/GrGLVertexBuffer.cpp60
-rw-r--r--src/gpu/gl/GrGLVertexBuffer.h48
-rw-r--r--src/gpu/vk/GrVkCaps.cpp4
-rw-r--r--src/gpu/vk/GrVkGpu.cpp37
-rw-r--r--src/gpu/vk/GrVkGpu.h6
-rw-r--r--src/gpu/vk/GrVkIndexBuffer.cpp9
-rw-r--r--src/gpu/vk/GrVkIndexBuffer.h8
-rw-r--r--src/gpu/vk/GrVkTransferBuffer.cpp4
-rw-r--r--src/gpu/vk/GrVkTransferBuffer.h17
-rw-r--r--src/gpu/vk/GrVkVertexBuffer.cpp9
-rw-r--r--src/gpu/vk/GrVkVertexBuffer.h8
65 files changed, 790 insertions, 1400 deletions
diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp
index db1b13471f..9f3c4dd983 100644
--- a/src/gpu/GrBatchAtlas.cpp
+++ b/src/gpu/GrBatchAtlas.cpp
@@ -9,7 +9,6 @@
#include "GrBatchFlushState.h"
#include "GrRectanizer.h"
#include "GrTracing.h"
-#include "GrVertexBuffer.h"
////////////////////////////////////////////////////////////////////////////////
diff --git a/src/gpu/GrBatchFlushState.cpp b/src/gpu/GrBatchFlushState.cpp
index 52261a1839..f01d88852e 100644
--- a/src/gpu/GrBatchFlushState.cpp
+++ b/src/gpu/GrBatchFlushState.cpp
@@ -20,11 +20,11 @@ GrBatchFlushState::GrBatchFlushState(GrGpu* gpu, GrResourceProvider* resourcePro
, fLastFlushedToken(0) {}
void* GrBatchFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
- const GrVertexBuffer** buffer, int* startVertex) {
+ const GrBuffer** buffer, int* startVertex) {
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
}
uint16_t* GrBatchFlushState::makeIndexSpace(int indexCount,
- const GrIndexBuffer** buffer, int* startIndex) {
+ const GrBuffer** buffer, int* startIndex) {
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
}
diff --git a/src/gpu/GrBatchFlushState.h b/src/gpu/GrBatchFlushState.h
index 1f82453c2e..be9d790597 100644
--- a/src/gpu/GrBatchFlushState.h
+++ b/src/gpu/GrBatchFlushState.h
@@ -76,8 +76,8 @@ public:
GrBatchToken asapToken() const { return fLastFlushedToken + 1; }
void* makeVertexSpace(size_t vertexSize, int vertexCount,
- const GrVertexBuffer** buffer, int* startVertex);
- uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex);
+ const GrBuffer** buffer, int* startVertex);
+ uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex);
/** This is called after each batch has a chance to prepare its draws and before the draws
are issued. */
@@ -172,11 +172,11 @@ public:
}
void* makeVertexSpace(size_t vertexSize, int vertexCount,
- const GrVertexBuffer** buffer, int* startVertex) {
+ const GrBuffer** buffer, int* startVertex) {
return this->state()->makeVertexSpace(vertexSize, vertexCount, buffer, startVertex);
}
- uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex) {
+ uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex) {
return this->state()->makeIndexSpace(indexCount, buffer, startIndex);
}
diff --git a/src/gpu/GrBuffer.h b/src/gpu/GrBuffer.h
new file mode 100644
index 0000000000..4fadba6aa7
--- /dev/null
+++ b/src/gpu/GrBuffer.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBuffer_DEFINED
+#define GrBuffer_DEFINED
+
+#include "GrGpuResource.h"
+
+class GrGpu;
+
+class GrBuffer : public GrGpuResource {
+public:
+ /**
+ * Computes a scratch key for a buffer with a "dynamic" access pattern. (Buffers with "static"
+ * and "stream" access patterns are disqualified by nature from being cached and reused.)
+ */
+ static void ComputeScratchKeyForDynamicBuffer(GrBufferType type, size_t size,
+ GrScratchKey* key) {
+ static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
+ GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
+ // TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
+ // a chunk of memory we can use/reuse for any type of data. We really only need to
+ // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
+ builder[0] = type;
+ builder[1] = (uint32_t)size;
+ if (sizeof(size_t) > 4) {
+ builder[2] = (uint32_t)((uint64_t)size >> 32);
+ }
+ }
+
+ GrBufferType type() const { return fType; }
+
+ GrAccessPattern accessPattern() const { return fAccessPattern; }
+
+ /**
+ * Returns true if the buffer is a wrapper around a CPU array. If true it
+ * indicates that map will always succeed and will be free.
+ */
+ bool isCPUBacked() const { return fCPUBacked; }
+
+ /**
+ * Maps the buffer to be written by the CPU.
+ *
+ * The previous content of the buffer is invalidated. It is an error
+ * to draw from the buffer while it is mapped. It may fail if the backend
+ * doesn't support mapping the buffer. If the buffer is CPU backed then
+ * it will always succeed and is a free operation. Once a buffer is mapped,
+ * subsequent calls to map() are ignored.
+ *
+ * Note that buffer mapping does not go through GrContext and therefore is
+ * not serialized with other operations.
+ *
+ * @return a pointer to the data or nullptr if the map fails.
+ */
+ void* map() {
+ if (!fMapPtr) {
+ this->onMap();
+ }
+ return fMapPtr;
+ }
+
+ /**
+ * Unmaps the buffer.
+ *
+ * The pointer returned by the previous map call will no longer be valid.
+ */
+ void unmap() {
+ SkASSERT(fMapPtr);
+ this->onUnmap();
+ fMapPtr = nullptr;
+ }
+
+ /**
+ * Returns the same ptr that map() returned at time of map or nullptr if the
+ * is not mapped.
+ *
+ * @return ptr to mapped buffer data or nullptr if buffer is not mapped.
+ */
+ void* mapPtr() const { return fMapPtr; }
+
+ /**
+ Queries whether the buffer has been mapped.
+
+ @return true if the buffer is mapped, false otherwise.
+ */
+ bool isMapped() const { return SkToBool(fMapPtr); }
+
+ /**
+ * Updates the buffer data.
+ *
+ * The size of the buffer will be preserved. The src data will be
+ * placed at the beginning of the buffer and any remaining contents will
+ * be undefined. srcSizeInBytes must be <= to the buffer size.
+ *
+ * The buffer must not be mapped.
+ *
+ * Note that buffer updates do not go through GrContext and therefore are
+ * not serialized with other operations.
+ *
+ * @return returns true if the update succeeds, false otherwise.
+ */
+ bool updateData(const void* src, size_t srcSizeInBytes) {
+ SkASSERT(!this->isMapped());
+ SkASSERT(srcSizeInBytes <= fGpuMemorySize);
+ return this->onUpdateData(src, srcSizeInBytes);
+ }
+
+protected:
+ GrBuffer(GrGpu* gpu, GrBufferType type, size_t gpuMemorySize, GrAccessPattern accessPattern,
+ bool cpuBacked)
+ : INHERITED(gpu, kCached_LifeCycle),
+ fMapPtr(nullptr),
+ fType(type),
+ fGpuMemorySize(gpuMemorySize), // TODO: Zero for cpu backed buffers?
+ fAccessPattern(accessPattern),
+ fCPUBacked(cpuBacked) {
+ if (!fCPUBacked && SkIsPow2(fGpuMemorySize) && kDynamic_GrAccessPattern == fAccessPattern) {
+ GrScratchKey key;
+ ComputeScratchKeyForDynamicBuffer(fType, fGpuMemorySize, &key);
+ this->setScratchKey(key);
+ }
+ }
+
+ void* fMapPtr;
+
+private:
+ virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
+
+ virtual void onMap() = 0;
+ virtual void onUnmap() = 0;
+ virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
+
+ GrBufferType fType;
+ size_t fGpuMemorySize;
+ GrAccessPattern fAccessPattern;
+ bool fCPUBacked;
+
+ typedef GrGpuResource INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
index 73b70bf861..ac34b5cfcc 100644
--- a/src/gpu/GrBufferAllocPool.cpp
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -8,13 +8,12 @@
#include "GrBufferAllocPool.h"
+#include "GrBuffer.h"
#include "GrCaps.h"
#include "GrContext.h"
#include "GrGpu.h"
-#include "GrIndexBuffer.h"
#include "GrResourceProvider.h"
#include "GrTypes.h"
-#include "GrVertexBuffer.h"
#include "SkTraceEvent.h"
@@ -41,7 +40,7 @@ do {
} while (false)
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
- BufferType bufferType,
+ GrBufferType bufferType,
size_t blockSize)
: fBlocks(8) {
@@ -53,12 +52,12 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
fBytesInUse = 0;
- fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
+ fBufferMapThreshold = gpu->caps()->bufferMapThreshold();
}
void GrBufferAllocPool::deleteBlocks() {
if (fBlocks.count()) {
- GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
+ GrBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isMapped()) {
UNMAP_BUFFER(fBlocks.back());
}
@@ -109,7 +108,7 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
if (fBufferPtr) {
SkASSERT(!fBlocks.empty());
if (fBlocks.back().fBuffer->isMapped()) {
- GrGeometryBuffer* buf = fBlocks.back().fBuffer;
+ GrBuffer* buf = fBlocks.back().fBuffer;
SkASSERT(buf->mapPtr() == fBufferPtr);
} else {
SkASSERT(fCpuData == fBufferPtr);
@@ -145,7 +144,7 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
void* GrBufferAllocPool::makeSpace(size_t size,
size_t alignment,
- const GrGeometryBuffer** buffer,
+ const GrBuffer** buffer,
size_t* offset) {
VALIDATE();
@@ -252,7 +251,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
// threshold.
bool attemptMap = block.fBuffer->isCPUBacked();
if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
- attemptMap = size > fGeometryBufferMapThreshold;
+ attemptMap = size > fBufferMapThreshold;
}
if (attemptMap) {
@@ -295,7 +294,7 @@ void* GrBufferAllocPool::resetCpuData(size_t newSize) {
void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
- GrGeometryBuffer* buffer = block.fBuffer;
+ GrBuffer* buffer = block.fBuffer;
SkASSERT(buffer);
SkASSERT(!buffer->isMapped());
SkASSERT(fCpuData == fBufferPtr);
@@ -303,7 +302,7 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
VALIDATE(true);
if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
- flushSize > fGeometryBufferMapThreshold) {
+ flushSize > fBufferMapThreshold) {
void* data = buffer->map();
if (data) {
memcpy(data, fBufferPtr, flushSize);
@@ -315,30 +314,24 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
VALIDATE(true);
}
-GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
+GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
- static const GrResourceProvider::BufferUsage kUsage = GrResourceProvider::kDynamic_BufferUsage;
// Shouldn't have to use this flag (https://bug.skia.org/4156)
static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
- if (kIndex_BufferType == fBufferType) {
- return rp->createIndexBuffer(size, kUsage, kFlags);
- } else {
- SkASSERT(kVertex_BufferType == fBufferType);
- return rp->createVertexBuffer(size, kUsage, kFlags);
- }
+ return rp->createBuffer(fBufferType, size, kDynamic_GrAccessPattern, kFlags);
}
////////////////////////////////////////////////////////////////////////////////
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
- : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
+ : GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) {
}
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
int vertexCount,
- const GrVertexBuffer** buffer,
+ const GrBuffer** buffer,
int* startVertex) {
SkASSERT(vertexCount >= 0);
@@ -346,13 +339,11 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
SkASSERT(startVertex);
size_t offset = 0; // assign to suppress warning
- const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
vertexSize,
- &geomBuffer,
+ buffer,
&offset);
- *buffer = (const GrVertexBuffer*) geomBuffer;
SkASSERT(0 == offset % vertexSize);
*startVertex = static_cast<int>(offset / vertexSize);
return ptr;
@@ -361,11 +352,11 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
////////////////////////////////////////////////////////////////////////////////
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
- : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
+ : GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) {
}
void* GrIndexBufferAllocPool::makeSpace(int indexCount,
- const GrIndexBuffer** buffer,
+ const GrBuffer** buffer,
int* startIndex) {
SkASSERT(indexCount >= 0);
@@ -373,13 +364,11 @@ void* GrIndexBufferAllocPool::makeSpace(int indexCount,
SkASSERT(startIndex);
size_t offset = 0; // assign to suppress warning
- const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
sizeof(uint16_t),
- &geomBuffer,
+ buffer,
&offset);
- *buffer = (const GrIndexBuffer*) geomBuffer;
SkASSERT(0 == offset % sizeof(uint16_t));
*startIndex = static_cast<int>(offset / sizeof(uint16_t));
return ptr;
diff --git a/src/gpu/GrBufferAllocPool.h b/src/gpu/GrBufferAllocPool.h
index a3d8e45364..071b00b064 100644
--- a/src/gpu/GrBufferAllocPool.h
+++ b/src/gpu/GrBufferAllocPool.h
@@ -11,8 +11,9 @@
#include "SkTArray.h"
#include "SkTDArray.h"
#include "SkTypes.h"
+#include "GrTypesPriv.h"
-class GrGeometryBuffer;
+class GrBuffer;
class GrGpu;
/**
@@ -47,16 +48,6 @@ public:
protected:
/**
- * Used to determine what type of buffers to create. We could make the
- * createBuffer a virtual except that we want to use it in the cons for
- * pre-allocated buffers.
- */
- enum BufferType {
- kVertex_BufferType,
- kIndex_BufferType,
- };
-
- /**
* Constructor
*
* @param gpu The GrGpu used to create the buffers.
@@ -66,7 +57,7 @@ protected:
* reasonable minimum.
*/
GrBufferAllocPool(GrGpu* gpu,
- BufferType bufferType,
+ GrBufferType bufferType,
size_t bufferSize = 0);
virtual ~GrBufferAllocPool();
@@ -92,15 +83,15 @@ protected:
*/
void* makeSpace(size_t size,
size_t alignment,
- const GrGeometryBuffer** buffer,
+ const GrBuffer** buffer,
size_t* offset);
- GrGeometryBuffer* getBuffer(size_t size);
+ GrBuffer* getBuffer(size_t size);
private:
struct BufferBlock {
- size_t fBytesFree;
- GrGeometryBuffer* fBuffer;
+ size_t fBytesFree;
+ GrBuffer* fBuffer;
};
bool createBlock(size_t requestSize);
@@ -115,16 +106,14 @@ private:
GrGpu* fGpu;
size_t fMinBlockSize;
- BufferType fBufferType;
+ GrBufferType fBufferType;
SkTArray<BufferBlock> fBlocks;
void* fCpuData;
void* fBufferPtr;
- size_t fGeometryBufferMapThreshold;
+ size_t fBufferMapThreshold;
};
-class GrVertexBuffer;
-
/**
* A GrBufferAllocPool of vertex buffers
*/
@@ -160,15 +149,13 @@ public:
*/
void* makeSpace(size_t vertexSize,
int vertexCount,
- const GrVertexBuffer** buffer,
+ const GrBuffer** buffer,
int* startVertex);
private:
typedef GrBufferAllocPool INHERITED;
};
-class GrIndexBuffer;
-
/**
* A GrBufferAllocPool of index buffers
*/
@@ -200,7 +187,7 @@ public:
* @return pointer to first index.
*/
void* makeSpace(int indexCount,
- const GrIndexBuffer** buffer,
+ const GrBuffer** buffer,
int* startIndex);
private:
diff --git a/src/gpu/GrCaps.cpp b/src/gpu/GrCaps.cpp
index 784e401328..857e789808 100644
--- a/src/gpu/GrCaps.cpp
+++ b/src/gpu/GrCaps.cpp
@@ -116,7 +116,7 @@ GrCaps::GrCaps(const GrContextOptions& options) {
fSuppressPrints = options.fSuppressPrints;
fImmediateFlush = options.fImmediateMode;
fDrawPathMasksToCompressedTextureSupport = options.fDrawPathToCompressedTexture;
- fGeometryBufferMapThreshold = options.fGeometryBufferMapThreshold;
+ fBufferMapThreshold = options.fBufferMapThreshold;
fUseDrawInsteadOfPartialRenderTargetWrite = options.fUseDrawInsteadOfPartialRenderTargetWrite;
fUseDrawInsteadOfAllRenderTargetWrites = false;
diff --git a/src/gpu/GrDrawTarget.cpp b/src/gpu/GrDrawTarget.cpp
index 08938ec0af..39c1e32d07 100644
--- a/src/gpu/GrDrawTarget.cpp
+++ b/src/gpu/GrDrawTarget.cpp
@@ -19,7 +19,6 @@
#include "GrRenderTargetPriv.h"
#include "GrSurfacePriv.h"
#include "GrTexture.h"
-#include "GrVertexBuffer.h"
#include "gl/GrGLRenderTarget.h"
#include "SkStrokeRec.h"
diff --git a/src/gpu/GrDrawTarget.h b/src/gpu/GrDrawTarget.h
index 9a6dbc9e71..c863b455b3 100644
--- a/src/gpu/GrDrawTarget.h
+++ b/src/gpu/GrDrawTarget.h
@@ -13,11 +13,8 @@
#include "GrContext.h"
#include "GrPathProcessor.h"
#include "GrPrimitiveProcessor.h"
-#include "GrIndexBuffer.h"
#include "GrPathRendering.h"
#include "GrPipelineBuilder.h"
-#include "GrPipeline.h"
-#include "GrVertexBuffer.h"
#include "GrXferProcessor.h"
#include "batches/GrDrawBatch.h"
diff --git a/src/gpu/GrGeometryBuffer.h b/src/gpu/GrGeometryBuffer.h
deleted file mode 100644
index 56a6cae3fb..0000000000
--- a/src/gpu/GrGeometryBuffer.h
+++ /dev/null
@@ -1,124 +0,0 @@
-
-/*
- * Copyright 2011 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-
-#ifndef GrGeometryBuffer_DEFINED
-#define GrGeometryBuffer_DEFINED
-
-#include "GrGpuResource.h"
-
-class GrGpu;
-
-/**
- * Parent class for vertex and index buffers
- */
-class GrGeometryBuffer : public GrGpuResource {
-public:
-
-
- /**
- *Retrieves whether the buffer was created with the dynamic flag
- *
- * @return true if the buffer was created with the dynamic flag
- */
- bool dynamic() const { return fDynamic; }
-
- /**
- * Returns true if the buffer is a wrapper around a CPU array. If true it
- * indicates that map will always succeed and will be free.
- */
- bool isCPUBacked() const { return fCPUBacked; }
-
- /**
- * Maps the buffer to be written by the CPU.
- *
- * The previous content of the buffer is invalidated. It is an error
- * to draw from the buffer while it is mapped. It is an error to call map
- * on an already mapped buffer. It may fail if the backend doesn't support
- * mapping the buffer. If the buffer is CPU backed then it will always
- * succeed and is a free operation. Must be matched by an unmap() call.
- * Currently only one map at a time is supported (no nesting of
- * map/unmap).
- *
- * Note that buffer mapping does not go through GrContext and therefore is
- * not serialized with other operations.
- *
- * @return a pointer to the data or nullptr if the map fails.
- */
- void* map() { return (fMapPtr = this->onMap()); }
-
- /**
- * Unmaps the buffer.
- *
- * The pointer returned by the previous map call will no longer be valid.
- */
- void unmap() {
- SkASSERT(fMapPtr);
- this->onUnmap();
- fMapPtr = nullptr;
- }
-
- /**
- * Returns the same ptr that map() returned at time of map or nullptr if the
- * is not mapped.
- *
- * @return ptr to mapped buffer data or nullptr if buffer is not mapped.
- */
- void* mapPtr() const { return fMapPtr; }
-
- /**
- Queries whether the buffer has been mapped.
-
- @return true if the buffer is mapped, false otherwise.
- */
- bool isMapped() const { return SkToBool(fMapPtr); }
-
- /**
- * Updates the buffer data.
- *
- * The size of the buffer will be preserved. The src data will be
- * placed at the beginning of the buffer and any remaining contents will
- * be undefined. srcSizeInBytes must be <= to the buffer size.
- *
- * The buffer must not be mapped.
- *
- * Note that buffer updates do not go through GrContext and therefore are
- * not serialized with other operations.
- *
- * @return returns true if the update succeeds, false otherwise.
- */
- bool updateData(const void* src, size_t srcSizeInBytes) {
- SkASSERT(!this->isMapped());
- SkASSERT(srcSizeInBytes <= fGpuMemorySize);
- return this->onUpdateData(src, srcSizeInBytes);
- }
-
-protected:
- GrGeometryBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, kCached_LifeCycle)
- , fMapPtr(nullptr)
- , fGpuMemorySize(gpuMemorySize)
- , fDynamic(dynamic)
- , fCPUBacked(cpuBacked) {}
-
-private:
- virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
-
- virtual void* onMap() = 0;
- virtual void onUnmap() = 0;
- virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
-
- void* fMapPtr;
- size_t fGpuMemorySize;
- bool fDynamic;
- bool fCPUBacked;
-
- typedef GrGpuResource INHERITED;
-};
-
-#endif
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index 177fbabc59..35c04caa23 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -8,10 +8,10 @@
#include "GrGpu.h"
+#include "GrBuffer.h"
#include "GrCaps.h"
#include "GrContext.h"
#include "GrGpuResourcePriv.h"
-#include "GrIndexBuffer.h"
#include "GrMesh.h"
#include "GrPathRendering.h"
#include "GrPipeline.h"
@@ -20,8 +20,6 @@
#include "GrRenderTargetPriv.h"
#include "GrStencilAttachment.h"
#include "GrSurfacePriv.h"
-#include "GrTransferBuffer.h"
-#include "GrVertexBuffer.h"
#include "SkTypes.h"
GrMesh& GrMesh::operator =(const GrMesh& di) {
@@ -238,28 +236,13 @@ GrRenderTarget* GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTextureDe
return this->onWrapBackendTextureAsRenderTarget(desc, ownership);
}
-GrVertexBuffer* GrGpu::createVertexBuffer(size_t size, bool dynamic) {
+GrBuffer* GrGpu::createBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
this->handleDirtyContext();
- GrVertexBuffer* vb = this->onCreateVertexBuffer(size, dynamic);
+ GrBuffer* buffer = this->onCreateBuffer(type, size, accessPattern);
if (!this->caps()->reuseScratchBuffers()) {
- vb->resourcePriv().removeScratchKey();
+ buffer->resourcePriv().removeScratchKey();
}
- return vb;
-}
-
-GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) {
- this->handleDirtyContext();
- GrIndexBuffer* ib = this->onCreateIndexBuffer(size, dynamic);
- if (!this->caps()->reuseScratchBuffers()) {
- ib->resourcePriv().removeScratchKey();
- }
- return ib;
-}
-
-GrTransferBuffer* GrGpu::createTransferBuffer(size_t size, TransferType type) {
- this->handleDirtyContext();
- GrTransferBuffer* tb = this->onCreateTransferBuffer(size, type);
- return tb;
+ return buffer;
}
void GrGpu::clear(const SkIRect& rect,
@@ -416,13 +399,13 @@ bool GrGpu::writePixels(GrSurface* surface,
bool GrGpu::transferPixels(GrSurface* surface,
int left, int top, int width, int height,
- GrPixelConfig config, GrTransferBuffer* buffer,
+ GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) {
- SkASSERT(buffer);
+ SkASSERT(transferBuffer);
this->handleDirtyContext();
if (this->onTransferPixels(surface, left, top, width, height, config,
- buffer, offset, rowBytes)) {
+ transferBuffer, offset, rowBytes)) {
fStats.incTransfersToTexture();
return true;
}
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 4e9b247ca3..a49b2c28fd 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -20,9 +20,9 @@
#include "SkTArray.h"
class GrBatchTracker;
+class GrBuffer;
class GrContext;
class GrGLContext;
-class GrIndexBuffer;
class GrMesh;
class GrNonInstancedVertices;
class GrPath;
@@ -36,8 +36,6 @@ class GrRenderTarget;
class GrStencilAttachment;
class GrSurface;
class GrTexture;
-class GrTransferBuffer;
-class GrVertexBuffer;
class GrGpu : public SkRefCnt {
public:
@@ -129,39 +127,11 @@ public:
GrRenderTarget* wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&, GrWrapOwnership);
/**
- * Creates a vertex buffer.
+ * Creates a buffer.
*
- * @param size size in bytes of the vertex buffer
- * @param dynamic hints whether the data will be frequently changed
- * by either GrVertexBuffer::map() or
- * GrVertexBuffer::updateData().
- *
- * @return The vertex buffer if successful, otherwise nullptr.
- */
- GrVertexBuffer* createVertexBuffer(size_t size, bool dynamic);
-
- /**
- * Creates an index buffer.
- *
- * @param size size in bytes of the index buffer
- * @param dynamic hints whether the data will be frequently changed
- * by either GrIndexBuffer::map() or
- * GrIndexBuffer::updateData().
- *
- * @return The index buffer if successful, otherwise nullptr.
- */
- GrIndexBuffer* createIndexBuffer(size_t size, bool dynamic);
-
- /**
- * Creates a transfer buffer.
- *
- * @param size size in bytes of the index buffer
- * @param toGpu true if used to transfer from the cpu to the gpu
- * otherwise to be used to transfer from the gpu to the cpu
- *
- * @return The transfer buffer if successful, otherwise nullptr.
+ * @return the buffer if successful, otherwise nullptr.
*/
- GrTransferBuffer* createTransferBuffer(size_t size, TransferType type);
+ GrBuffer* createBuffer(GrBufferType, size_t size, GrAccessPattern);
/**
* Resolves MSAA.
@@ -298,22 +268,22 @@ public:
size_t rowBytes);
/**
- * Updates the pixels in a rectangle of a surface using a GrTransferBuffer
+ * Updates the pixels in a rectangle of a surface using a buffer
*
- * @param surface The surface to write to.
- * @param left left edge of the rectangle to write (inclusive)
- * @param top top edge of the rectangle to write (inclusive)
- * @param width width of rectangle to write in pixels.
- * @param height height of rectangle to write in pixels.
- * @param config the pixel config of the source buffer
- * @param buffer GrTransferBuffer to read pixels from
- * @param offset offset from the start of the buffer
- * @param rowBytes number of bytes between consecutive rows. Zero
- * means rows are tightly packed.
+ * @param surface The surface to write to.
+ * @param left left edge of the rectangle to write (inclusive)
+ * @param top top edge of the rectangle to write (inclusive)
+ * @param width width of rectangle to write in pixels.
+ * @param height height of rectangle to write in pixels.
+ * @param config the pixel config of the source buffer
+ * @param transferBuffer GrBuffer to read pixels from (type must be "kCpuToGpu")
+ * @param offset offset from the start of the buffer
+ * @param rowBytes number of bytes between consecutive rows. Zero
+ * means rows are tightly packed.
*/
bool transferPixels(GrSurface* surface,
int left, int top, int width, int height,
- GrPixelConfig config, GrTransferBuffer* buffer,
+ GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes);
/**
@@ -558,9 +528,7 @@ private:
GrWrapOwnership) = 0;
virtual GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&,
GrWrapOwnership) = 0;
- virtual GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) = 0;
- virtual GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) = 0;
- virtual GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) = 0;
+ virtual GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) = 0;
// overridden by backend-specific derived class to perform the clear.
virtual void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) = 0;
@@ -602,7 +570,7 @@ private:
// overridden by backend-specific derived class to perform the surface write
virtual bool onTransferPixels(GrSurface*,
int left, int top, int width, int height,
- GrPixelConfig config, GrTransferBuffer* buffer,
+ GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) = 0;
// overridden by backend-specific derived class to perform the resolve
diff --git a/src/gpu/GrIndexBuffer.h b/src/gpu/GrIndexBuffer.h
deleted file mode 100644
index 2e3b437adf..0000000000
--- a/src/gpu/GrIndexBuffer.h
+++ /dev/null
@@ -1,51 +0,0 @@
-
-/*
- * Copyright 2010 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-
-
-#ifndef GrIndexBuffer_DEFINED
-#define GrIndexBuffer_DEFINED
-
-#include "GrGeometryBuffer.h"
-
-
-class GrIndexBuffer : public GrGeometryBuffer {
-public:
- static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
- static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
-
- GrScratchKey::Builder builder(key, kType, 2);
-
- builder[0] = SkToUInt(size);
- builder[1] = dynamic ? 1 : 0;
- }
-
- /**
- * Retrieves the maximum number of quads that could be rendered
- * from the index buffer (using kTriangles_GrPrimitiveType).
- * @return the maximum number of quads using full size of index buffer.
- */
- int maxQuads() const {
- return static_cast<int>(this->gpuMemorySize() / (sizeof(uint16_t) * 6));
- }
-protected:
- GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
- // We currently only make buffers scratch if they're both pow2 sized and not cpuBacked.
- if (!cpuBacked && SkIsPow2(gpuMemorySize)) {
- GrScratchKey key;
- ComputeScratchKey(gpuMemorySize, dynamic, &key);
- this->setScratchKey(key);
- }
- }
-
-private:
- typedef GrGeometryBuffer INHERITED;
-};
-
-#endif
diff --git a/src/gpu/GrMesh.h b/src/gpu/GrMesh.h
index 5ff23dcfd0..964e0b4a8e 100644
--- a/src/gpu/GrMesh.h
+++ b/src/gpu/GrMesh.h
@@ -8,8 +8,8 @@
#ifndef GrMesh_DEFINED
#define GrMesh_DEFINED
-#include "GrIndexBuffer.h"
-#include "GrVertexBuffer.h"
+#include "GrBuffer.h"
+#include "GrGpuResourceRef.h"
class GrNonInstancedMesh {
public:
@@ -20,8 +20,8 @@ public:
int indexCount() const { return fIndexCount; }
bool isIndexed() const { return fIndexCount > 0; }
- const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get(); }
- const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); }
+ const GrBuffer* vertexBuffer() const { return fVertexBuffer.get(); }
+ const GrBuffer* indexBuffer() const { return fIndexBuffer.get(); }
protected:
GrPrimitiveType fPrimitiveType;
@@ -29,8 +29,8 @@ protected:
int fStartIndex;
int fVertexCount;
int fIndexCount;
- GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuffer;
- GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffer;
+ GrPendingIOResource<const GrBuffer, kRead_GrIOType> fVertexBuffer;
+ GrPendingIOResource<const GrBuffer, kRead_GrIOType> fIndexBuffer;
friend class GrMesh;
};
@@ -46,7 +46,7 @@ public:
GrMesh(const GrMesh& di) { (*this) = di; }
GrMesh& operator =(const GrMesh& di);
- void init(GrPrimitiveType primType, const GrVertexBuffer* vertexBuffer, int startVertex,
+ void init(GrPrimitiveType primType, const GrBuffer* vertexBuffer, int startVertex,
int vertexCount) {
SkASSERT(vertexBuffer);
SkASSERT(vertexCount);
@@ -65,8 +65,8 @@ public:
}
void initIndexed(GrPrimitiveType primType,
- const GrVertexBuffer* vertexBuffer,
- const GrIndexBuffer* indexBuffer,
+ const GrBuffer* vertexBuffer,
+ const GrBuffer* indexBuffer,
int startVertex,
int startIndex,
int vertexCount,
@@ -95,8 +95,8 @@ public:
the number of instances supported by the index buffer. To be used with
nextInstances() to draw in max-sized batches.*/
void initInstanced(GrPrimitiveType primType,
- const GrVertexBuffer* vertexBuffer,
- const GrIndexBuffer* indexBuffer,
+ const GrBuffer* vertexBuffer,
+ const GrBuffer* indexBuffer,
int startVertex,
int verticesPerInstance,
int indicesPerInstance,
diff --git a/src/gpu/GrOvalRenderer.cpp b/src/gpu/GrOvalRenderer.cpp
index d0be27d9b0..9388f323c8 100644
--- a/src/gpu/GrOvalRenderer.cpp
+++ b/src/gpu/GrOvalRenderer.cpp
@@ -1211,8 +1211,8 @@ static const int kNumRRectsInIndexBuffer = 256;
GR_DECLARE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
GR_DECLARE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
-static const GrIndexBuffer* ref_rrect_index_buffer(bool strokeOnly,
- GrResourceProvider* resourceProvider) {
+static const GrBuffer* ref_rrect_index_buffer(bool strokeOnly,
+ GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
GR_DEFINE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
if (strokeOnly) {
@@ -1286,7 +1286,7 @@ private:
// drop out the middle quad if we're stroked
int indicesPerInstance = fStroked ? kIndicesPerStrokeRRect : kIndicesPerRRect;
- SkAutoTUnref<const GrIndexBuffer> indexBuffer(
+ SkAutoTUnref<const GrBuffer> indexBuffer(
ref_rrect_index_buffer(fStroked, target->resourceProvider()));
InstancedHelper helper;
@@ -1434,7 +1434,7 @@ private:
// drop out the middle quad if we're stroked
int indicesPerInstance = fStroked ? kIndicesPerStrokeRRect : kIndicesPerRRect;
- SkAutoTUnref<const GrIndexBuffer> indexBuffer(
+ SkAutoTUnref<const GrBuffer> indexBuffer(
ref_rrect_index_buffer(fStroked, target->resourceProvider()));
InstancedHelper helper;
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index 79146d0911..a73bf752b0 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -7,15 +7,14 @@
#include "GrResourceProvider.h"
+#include "GrBuffer.h"
#include "GrGpu.h"
-#include "GrIndexBuffer.h"
#include "GrPathRendering.h"
#include "GrRenderTarget.h"
#include "GrRenderTargetPriv.h"
#include "GrResourceCache.h"
#include "GrResourceKey.h"
#include "GrStencilAttachment.h"
-#include "GrVertexBuffer.h"
GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
@@ -25,16 +24,16 @@ GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSin
fQuadIndexBufferKey = gQuadIndexBufferKey;
}
-const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern,
- int patternSize,
- int reps,
- int vertCount,
- const GrUniqueKey& key) {
+const GrBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey& key) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
// This is typically used in GrBatchs, so we assume kNoPendingIO.
- GrIndexBuffer* buffer = this->createIndexBuffer(bufferSize, kStatic_BufferUsage,
- kNoPendingIO_Flag);
+ GrBuffer* buffer = this->createBuffer(kIndex_GrBufferType, bufferSize, kStatic_GrAccessPattern,
+ kNoPendingIO_Flag);
if (!buffer) {
return nullptr;
}
@@ -63,7 +62,7 @@ const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16
return buffer;
}
-const GrIndexBuffer* GrResourceProvider::createQuadIndexBuffer() {
+const GrBuffer* GrResourceProvider::createQuadIndexBuffer() {
static const int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
static const uint16_t kPattern[] = { 0, 1, 2, 0, 2, 3 };
@@ -89,72 +88,31 @@ GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf, const SkDesc
return this->gpu()->pathRendering()->createGlyphs(tf, desc, stroke);
}
-GrIndexBuffer* GrResourceProvider::createIndexBuffer(size_t size, BufferUsage usage,
- uint32_t flags) {
+GrBuffer* GrResourceProvider::createBuffer(GrBufferType type, size_t size,
+ GrAccessPattern accessPattern, uint32_t flags) {
if (this->isAbandoned()) {
return nullptr;
}
- bool noPendingIO = SkToBool(flags & kNoPendingIO_Flag);
- bool dynamic = kDynamic_BufferUsage == usage;
- if (dynamic) {
+ if (kDynamic_GrAccessPattern == accessPattern) {
// bin by pow2 with a reasonable min
static const uint32_t MIN_SIZE = 1 << 12;
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
GrScratchKey key;
- GrIndexBuffer::ComputeScratchKey(size, true, &key);
+ GrBuffer::ComputeScratchKeyForDynamicBuffer(type, size, &key);
uint32_t scratchFlags = 0;
- if (noPendingIO) {
+ if (flags & kNoPendingIO_Flag) {
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
} else {
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
}
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, size, scratchFlags);
if (resource) {
- return static_cast<GrIndexBuffer*>(resource);
+ return static_cast<GrBuffer*>(resource);
}
}
- return this->gpu()->createIndexBuffer(size, dynamic);
-}
-
-GrVertexBuffer* GrResourceProvider::createVertexBuffer(size_t size, BufferUsage usage,
- uint32_t flags) {
- if (this->isAbandoned()) {
- return nullptr;
- }
-
- bool noPendingIO = SkToBool(flags & kNoPendingIO_Flag);
- bool dynamic = kDynamic_BufferUsage == usage;
- if (dynamic) {
- // bin by pow2 with a reasonable min
- static const uint32_t MIN_SIZE = 1 << 12;
- size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
-
- GrScratchKey key;
- GrVertexBuffer::ComputeScratchKey(size, true, &key);
- uint32_t scratchFlags = 0;
- if (noPendingIO) {
- scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
- } else {
- scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
- }
- GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, size, scratchFlags);
- if (resource) {
- return static_cast<GrVertexBuffer*>(resource);
- }
- }
- return this->gpu()->createVertexBuffer(size, dynamic);
-}
-
-GrTransferBuffer* GrResourceProvider::createTransferBuffer(size_t size, TransferType type,
- uint32_t flags) {
- if (this->isAbandoned()) {
- return nullptr;
- }
-
- //bool noPendingIO = SkToBool(flags & kNoPendingIO_Flag);
- return this->gpu()->createTransferBuffer(size, type);
+ return this->gpu()->createBuffer(type, size, accessPattern);
}
GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index 3dfc9ba863..7b51726d10 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -9,18 +9,16 @@
#define GrResourceProvider_DEFINED
#include "GrBatchAtlas.h"
-#include "GrIndexBuffer.h"
+#include "GrBuffer.h"
#include "GrTextureProvider.h"
#include "GrPathRange.h"
class GrBatchAtlas;
-class GrIndexBuffer;
class GrPath;
class GrRenderTarget;
class GrSingleOwner;
class GrStencilAttachment;
class GrStrokeInfo;
-class GrVertexBuffer;
class SkDescriptor;
class SkPath;
class SkTypeface;
@@ -45,7 +43,7 @@ public:
/**
* Either finds and refs, or creates an index buffer for instanced drawing with a specific
* pattern if the index buffer is not found. If the return is non-null, the caller owns
- * a ref on the returned GrIndexBuffer.
+ * a ref on the returned GrBuffer.
*
* @param pattern the pattern of indices to repeat
* @param patternSize size in bytes of the pattern
@@ -55,12 +53,12 @@ public:
*
* @return The index buffer if successful, otherwise nullptr.
*/
- const GrIndexBuffer* findOrCreateInstancedIndexBuffer(const uint16_t* pattern,
- int patternSize,
- int reps,
- int vertCount,
- const GrUniqueKey& key) {
- if (GrIndexBuffer* buffer = this->findAndRefTByUniqueKey<GrIndexBuffer>(key)) {
+ const GrBuffer* findOrCreateInstancedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey& key) {
+ if (GrBuffer* buffer = this->findAndRefTByUniqueKey<GrBuffer>(key)) {
return buffer;
}
return this->createInstancedIndexBuffer(pattern, patternSize, reps, vertCount, key);
@@ -69,13 +67,13 @@ public:
/**
* Returns an index buffer that can be used to render quads.
* Six indices per quad: 0, 1, 2, 0, 2, 3, etc.
- * The max number of quads can be queried using GrIndexBuffer::maxQuads().
+ * The max number of quads is the buffer's index capacity divided by 6.
* Draw with kTriangles_GrPrimitiveType
* @ return the quad index buffer
*/
- const GrIndexBuffer* refQuadIndexBuffer() {
- if (GrIndexBuffer* buffer =
- this->findAndRefTByUniqueKey<GrIndexBuffer>(fQuadIndexBufferKey)) {
+ const GrBuffer* refQuadIndexBuffer() {
+ if (GrBuffer* buffer =
+ this->findAndRefTByUniqueKey<GrBuffer>(fQuadIndexBufferKey)) {
return buffer;
}
return this->createQuadIndexBuffer();
@@ -104,16 +102,7 @@ public:
kNoPendingIO_Flag = kNoPendingIO_ScratchTextureFlag,
};
- enum BufferUsage {
- /** Caller intends to specify the buffer data rarely with respect to the number of draws
- that read the data. */
- kStatic_BufferUsage,
- /** Caller intends to respecify the buffer data frequently between draws. */
- kDynamic_BufferUsage,
- };
- GrIndexBuffer* createIndexBuffer(size_t size, BufferUsage, uint32_t flags);
- GrVertexBuffer* createVertexBuffer(size_t size, BufferUsage, uint32_t flags);
- GrTransferBuffer* createTransferBuffer(size_t size, TransferType, uint32_t flags);
+ GrBuffer* createBuffer(GrBufferType, size_t size, GrAccessPattern, uint32_t flags);
GrTexture* createApproxTexture(const GrSurfaceDesc& desc, uint32_t flags) {
SkASSERT(0 == flags || kNoPendingIO_Flag == flags);
@@ -157,13 +146,13 @@ public:
GrWrapOwnership = kBorrow_GrWrapOwnership);
private:
- const GrIndexBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
- int patternSize,
- int reps,
- int vertCount,
- const GrUniqueKey& key);
+ const GrBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
+ int patternSize,
+ int reps,
+ int vertCount,
+ const GrUniqueKey& key);
- const GrIndexBuffer* createQuadIndexBuffer();
+ const GrBuffer* createQuadIndexBuffer();
GrUniqueKey fQuadIndexBufferKey;
diff --git a/src/gpu/GrSoftwarePathRenderer.cpp b/src/gpu/GrSoftwarePathRenderer.cpp
index af77c205b5..093bef7086 100644
--- a/src/gpu/GrSoftwarePathRenderer.cpp
+++ b/src/gpu/GrSoftwarePathRenderer.cpp
@@ -9,7 +9,6 @@
#include "GrSoftwarePathRenderer.h"
#include "GrContext.h"
#include "GrSWMaskHelper.h"
-#include "GrVertexBuffer.h"
#include "batches/GrRectBatchFactory.h"
////////////////////////////////////////////////////////////////////////////////
diff --git a/src/gpu/GrTest.cpp b/src/gpu/GrTest.cpp
index c9b26e284a..2b6463d853 100644
--- a/src/gpu/GrTest.cpp
+++ b/src/gpu/GrTest.cpp
@@ -345,11 +345,7 @@ private:
return nullptr;
}
- GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override { return nullptr; }
-
- GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override { return nullptr; }
-
- GrTransferBuffer* onCreateTransferBuffer(size_t, TransferType) override { return nullptr; }
+ GrBuffer* onCreateBuffer(GrBufferType, size_t, GrAccessPattern) override { return nullptr; }
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override {}
@@ -376,7 +372,7 @@ private:
bool onTransferPixels(GrSurface* surface,
int left, int top, int width, int height,
- GrPixelConfig config, GrTransferBuffer* buffer,
+ GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) override {
return false;
}
@@ -410,7 +406,7 @@ GrContext* GrContext::CreateMockContext() {
void GrContext::initMockContext() {
GrContextOptions options;
- options.fGeometryBufferMapThreshold = 0;
+ options.fBufferMapThreshold = 0;
SkASSERT(nullptr == fGpu);
fGpu = new MockGpu(this, options);
SkASSERT(fGpu);
diff --git a/src/gpu/GrTransferBuffer.h b/src/gpu/GrTransferBuffer.h
deleted file mode 100755
index bd80666fd6..0000000000
--- a/src/gpu/GrTransferBuffer.h
+++ /dev/null
@@ -1,76 +0,0 @@
-
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-
-#ifndef GrTransferBuffer_DEFINED
-#define GrTransferBuffer_DEFINED
-
-#include "GrGpuResource.h"
-
-class GrTransferBuffer : public GrGpuResource {
-public:
- /**
- * Maps the buffer to be written by the CPU.
- *
- * The previous content of the buffer is invalidated. It is an error
- * to transfer to or from the buffer while it is mapped. It is an error to
- * call map on an already mapped buffer. Must be matched by an unmap() call.
- * Currently only one map at a time is supported (no nesting of map/unmap).
- *
- * Note that buffer mapping does not go through GrContext and therefore is
- * not serialized with other operations.
- *
- * @return a pointer to the data or nullptr if the map fails.
- */
- void* map() { return (fMapPtr = this->onMap()); }
-
- /**
- * Unmaps the buffer.
- *
- * The pointer returned by the previous map call will no longer be valid.
- */
- void unmap() {
- SkASSERT(fMapPtr);
- this->onUnmap();
- fMapPtr = nullptr;
- }
-
- /**
- * Returns the same ptr that map() returned at time of map or nullptr if the
- * is not mapped.
- *
- * @return ptr to mapped buffer data or nullptr if buffer is not mapped.
- */
- void* mapPtr() const { return fMapPtr; }
-
- /**
- Queries whether the buffer has been mapped.
-
- @return true if the buffer is mapped, false otherwise.
- */
- bool isMapped() const { return SkToBool(fMapPtr); }
-
-protected:
- GrTransferBuffer(GrGpu* gpu, size_t gpuMemorySize)
- : INHERITED(gpu, kUncached_LifeCycle)
- , fGpuMemorySize(gpuMemorySize) {
- }
-
-private:
- virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
-
- virtual void* onMap() = 0;
- virtual void onUnmap() = 0;
-
- void* fMapPtr;
- size_t fGpuMemorySize;
-
- typedef GrGpuResource INHERITED;
-};
-
-#endif
diff --git a/src/gpu/GrVertexBuffer.h b/src/gpu/GrVertexBuffer.h
deleted file mode 100644
index 3c62cd61ca..0000000000
--- a/src/gpu/GrVertexBuffer.h
+++ /dev/null
@@ -1,42 +0,0 @@
-
-/*
- * Copyright 2010 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-
-
-#ifndef GrVertexBuffer_DEFINED
-#define GrVertexBuffer_DEFINED
-
-#include "GrGeometryBuffer.h"
-
-class GrVertexBuffer : public GrGeometryBuffer {
-public:
- static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
- static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
-
- GrScratchKey::Builder builder(key, kType, 2);
-
- builder[0] = SkToUInt(size);
- builder[1] = dynamic ? 1 : 0;
- }
-
-protected:
- GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
- : INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
- // We currently only make buffers scratch if they're both pow2 sized and not cpuBacked.
- if (!cpuBacked && SkIsPow2(gpuMemorySize)) {
- GrScratchKey key;
- ComputeScratchKey(gpuMemorySize, dynamic, &key);
- this->setScratchKey(key);
- }
- }
-
-private:
- typedef GrGeometryBuffer INHERITED;
-};
-
-#endif
diff --git a/src/gpu/batches/GrAAConvexPathRenderer.cpp b/src/gpu/batches/GrAAConvexPathRenderer.cpp
index f6be366697..ba19a0db85 100644
--- a/src/gpu/batches/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAAConvexPathRenderer.cpp
@@ -814,7 +814,7 @@ private:
continue;
}
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
@@ -824,7 +824,7 @@ private:
return;
}
- const GrIndexBuffer* indexBuffer;
+ const GrBuffer* indexBuffer;
int firstIndex;
uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
@@ -900,7 +900,7 @@ private:
continue;
}
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
size_t vertexStride = quadProcessor->getVertexStride();
@@ -912,7 +912,7 @@ private:
return;
}
- const GrIndexBuffer* indexBuffer;
+ const GrBuffer* indexBuffer;
int firstIndex;
uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
index 7e96b86550..6a8ec2ff85 100644
--- a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -10,13 +10,13 @@
#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
+#include "GrBuffer.h"
#include "GrContext.h"
#include "GrPipelineBuilder.h"
#include "GrResourceProvider.h"
#include "GrSurfacePriv.h"
#include "GrSWMaskHelper.h"
#include "GrTexturePriv.h"
-#include "GrVertexBuffer.h"
#include "batches/GrVertexBatch.h"
#include "effects/GrDistanceFieldGeoProc.h"
@@ -177,8 +177,8 @@ private:
}
struct FlushInfo {
- SkAutoTUnref<const GrVertexBuffer> fVertexBuffer;
- SkAutoTUnref<const GrIndexBuffer> fIndexBuffer;
+ SkAutoTUnref<const GrBuffer> fVertexBuffer;
+ SkAutoTUnref<const GrBuffer> fIndexBuffer;
int fVertexOffset;
int fInstancesToFlush;
};
@@ -217,7 +217,7 @@ private:
size_t vertexStride = dfProcessor->getVertexStride();
SkASSERT(vertexStride == 2 * sizeof(SkPoint) + sizeof(GrColor));
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
void* vertices = target->makeVertexSpace(vertexStride,
kVerticesPerQuad * instanceCount,
&vertexBuffer,
@@ -492,7 +492,8 @@ private:
void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
GrMesh mesh;
- int maxInstancesPerDraw = flushInfo->fIndexBuffer->maxQuads();
+ int maxInstancesPerDraw =
+ static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
mesh.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
flushInfo->fIndexBuffer, flushInfo->fVertexOffset, kVerticesPerQuad,
kIndicesPerQuad, flushInfo->fInstancesToFlush, maxInstancesPerDraw);
diff --git a/src/gpu/batches/GrAAFillRectBatch.cpp b/src/gpu/batches/GrAAFillRectBatch.cpp
index 9ec8ffdc4a..890a18b576 100644
--- a/src/gpu/batches/GrAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrAAFillRectBatch.cpp
@@ -28,7 +28,7 @@ static const int kNumAAFillRectsInIndexBuffer = 256;
static const int kVertsPerAAFillRect = 8;
static const int kIndicesPerAAFillRect = 30;
-const GrIndexBuffer* get_index_buffer(GrResourceProvider* resourceProvider) {
+const GrBuffer* get_index_buffer(GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
static const uint16_t gFillAARectIdx[] = {
@@ -191,7 +191,7 @@ public:
out->setUnknownSingleComponent();
}
- static const GrIndexBuffer* GetIndexBuffer(GrResourceProvider* rp) {
+ static const GrBuffer* GetIndexBuffer(GrResourceProvider* rp) {
return get_index_buffer(rp);
}
diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
index 7d45ed71f9..2cc91f9740 100644
--- a/src/gpu/batches/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
@@ -9,15 +9,14 @@
#include "GrBatchFlushState.h"
#include "GrBatchTest.h"
+#include "GrBuffer.h"
#include "GrCaps.h"
#include "GrContext.h"
#include "GrDefaultGeoProcFactory.h"
-#include "GrIndexBuffer.h"
#include "GrPathUtils.h"
#include "GrPipelineBuilder.h"
#include "GrProcessor.h"
#include "GrResourceProvider.h"
-#include "GrVertexBuffer.h"
#include "SkGeometry.h"
#include "SkStroke.h"
#include "SkTemplates.h"
@@ -65,7 +64,7 @@ static const int kQuadNumVertices = 5;
static const int kQuadsNumInIdxBuffer = 256;
GR_DECLARE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
-static const GrIndexBuffer* ref_quads_index_buffer(GrResourceProvider* resourceProvider) {
+static const GrBuffer* ref_quads_index_buffer(GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
return resourceProvider->findOrCreateInstancedIndexBuffer(
kQuadIdxBufPattern, kIdxsPerQuad, kQuadsNumInIdxBuffer, kQuadNumVertices,
@@ -99,7 +98,7 @@ static const int kLineSegsNumInIdxBuffer = 256;
GR_DECLARE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
-static const GrIndexBuffer* ref_lines_index_buffer(GrResourceProvider* resourceProvider) {
+static const GrBuffer* ref_lines_index_buffer(GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
return resourceProvider->findOrCreateInstancedIndexBuffer(
kLineSegIdxBufPattern, kIdxsPerLineSeg, kLineSegsNumInIdxBuffer, kLineSegNumVertices,
@@ -858,11 +857,11 @@ void AAHairlineBatch::onPrepareDraws(Target* target) const {
// do lines first
if (lineCount) {
- SkAutoTUnref<const GrIndexBuffer> linesIndexBuffer(
+ SkAutoTUnref<const GrBuffer> linesIndexBuffer(
ref_lines_index_buffer(target->resourceProvider()));
target->initDraw(lineGP);
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
size_t vertexStride = lineGP->getVertexStride();
@@ -891,10 +890,10 @@ void AAHairlineBatch::onPrepareDraws(Target* target) const {
}
if (quadCount || conicCount) {
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
- SkAutoTUnref<const GrIndexBuffer> quadsIndexBuffer(
+ SkAutoTUnref<const GrBuffer> quadsIndexBuffer(
ref_quads_index_buffer(target->resourceProvider()));
size_t vertexStride = sizeof(BezierVertex);
diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
index 120ecc76ee..1cd8daf881 100644
--- a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
@@ -164,7 +164,7 @@ private:
if (vertexCount == 0 || indexCount == 0) {
return;
}
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
GrMesh mesh;
int firstVertex;
void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
@@ -175,7 +175,7 @@ private:
}
memcpy(verts, vertices, vertexCount * vertexStride);
- const GrIndexBuffer* indexBuffer;
+ const GrBuffer* indexBuffer;
int firstIndex;
uint16_t* idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
if (!idxs) {
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp
index 54bc495a4d..2ce5eca7a6 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -123,8 +123,7 @@ private:
static const int kBevelVertexCnt = 24;
static const int kNumBevelRectsInIndexBuffer = 256;
- static const GrIndexBuffer* GetIndexBuffer(GrResourceProvider* resourceProvider,
- bool miterStroke);
+ static const GrBuffer* GetIndexBuffer(GrResourceProvider* resourceProvider, bool miterStroke);
GrColor color() const { return fBatch.fColor; }
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
@@ -206,7 +205,7 @@ void AAStrokeRectBatch::onPrepareDraws(Target* target) const {
int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt;
int instanceCount = fGeoData.count();
- const SkAutoTUnref<const GrIndexBuffer> indexBuffer(
+ const SkAutoTUnref<const GrBuffer> indexBuffer(
GetIndexBuffer(target->resourceProvider(), this->miterStroke()));
InstancedHelper helper;
void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
@@ -235,8 +234,8 @@ void AAStrokeRectBatch::onPrepareDraws(Target* target) const {
helper.recordDraw(target);
}
-const GrIndexBuffer* AAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourceProvider,
- bool miterStroke) {
+const GrBuffer* AAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourceProvider,
+ bool miterStroke) {
if (miterStroke) {
static const uint16_t gMiterIndices[] = {
diff --git a/src/gpu/batches/GrAtlasTextBatch.cpp b/src/gpu/batches/GrAtlasTextBatch.cpp
index 8b89039406..654643d339 100644
--- a/src/gpu/batches/GrAtlasTextBatch.cpp
+++ b/src/gpu/batches/GrAtlasTextBatch.cpp
@@ -119,7 +119,7 @@ void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
target->initDraw(gp);
int glyphCount = this->numGlyphs();
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
void* vertices = target->makeVertexSpace(vertexStride,
glyphCount * kVerticesPerGlyph,
@@ -181,7 +181,8 @@ void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
void GrAtlasTextBatch::flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
GrMesh mesh;
- int maxGlyphsPerDraw = flushInfo->fIndexBuffer->maxQuads();
+ int maxGlyphsPerDraw =
+ static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
mesh.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
flushInfo->fIndexBuffer, flushInfo->fVertexOffset,
kVerticesPerGlyph, kIndicesPerGlyph, flushInfo->fGlyphsToFlush,
diff --git a/src/gpu/batches/GrAtlasTextBatch.h b/src/gpu/batches/GrAtlasTextBatch.h
index 435fb59d49..01d799233e 100644
--- a/src/gpu/batches/GrAtlasTextBatch.h
+++ b/src/gpu/batches/GrAtlasTextBatch.h
@@ -99,8 +99,8 @@ private:
void initBatchTracker(const GrXPOverridesForBatch& overrides) override;
struct FlushInfo {
- SkAutoTUnref<const GrVertexBuffer> fVertexBuffer;
- SkAutoTUnref<const GrIndexBuffer> fIndexBuffer;
+ SkAutoTUnref<const GrBuffer> fVertexBuffer;
+ SkAutoTUnref<const GrBuffer> fIndexBuffer;
int fGlyphsToFlush;
int fVertexOffset;
};
diff --git a/src/gpu/batches/GrDefaultPathRenderer.cpp b/src/gpu/batches/GrDefaultPathRenderer.cpp
index acaff173c6..0a3dcd6a14 100644
--- a/src/gpu/batches/GrDefaultPathRenderer.cpp
+++ b/src/gpu/batches/GrDefaultPathRenderer.cpp
@@ -313,7 +313,7 @@ private:
}
// allocate vertex / index buffers
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
void* verts = target->makeVertexSpace(vertexStride, maxVertices,
@@ -324,7 +324,7 @@ private:
return;
}
- const GrIndexBuffer* indexBuffer = nullptr;
+ const GrBuffer* indexBuffer = nullptr;
int firstIndex = 0;
void* indices = nullptr;
diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp
index c76ba7d1ec..20d9f79290 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.cpp
+++ b/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -106,7 +106,7 @@ void GrDrawVerticesBatch::onPrepareDraws(Target* target) const {
int instanceCount = fGeoData.count();
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex);
@@ -116,7 +116,7 @@ void GrDrawVerticesBatch::onPrepareDraws(Target* target) const {
return;
}
- const GrIndexBuffer* indexBuffer = nullptr;
+ const GrBuffer* indexBuffer = nullptr;
int firstIndex = 0;
uint16_t* indices = nullptr;
diff --git a/src/gpu/batches/GrNinePatch.cpp b/src/gpu/batches/GrNinePatch.cpp
index 3ca004d46c..17ab6c1c30 100644
--- a/src/gpu/batches/GrNinePatch.cpp
+++ b/src/gpu/batches/GrNinePatch.cpp
@@ -96,7 +96,7 @@ private:
size_t vertexStride = gp->getVertexStride();
int instanceCount = fGeoData.count();
- SkAutoTUnref<const GrIndexBuffer> indexBuffer(
+ SkAutoTUnref<const GrBuffer> indexBuffer(
target->resourceProvider()->refQuadIndexBuffer());
InstancedHelper helper;
void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
diff --git a/src/gpu/batches/GrNonAAFillRectBatch.cpp b/src/gpu/batches/GrNonAAFillRectBatch.cpp
index 09643444d0..5e9d1769b5 100644
--- a/src/gpu/batches/GrNonAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAFillRectBatch.cpp
@@ -26,7 +26,7 @@ public:
out->setKnownSingleComponent(0xff);
}
- static const GrIndexBuffer* GetIndexBuffer(GrResourceProvider* rp) {
+ static const GrBuffer* GetIndexBuffer(GrResourceProvider* rp) {
return rp->refQuadIndexBuffer();
}
diff --git a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
index a87ab11383..3e5311f7b9 100644
--- a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
@@ -130,7 +130,7 @@ private:
vertexCount = kVertsPerStrokeRect;
}
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
diff --git a/src/gpu/batches/GrPLSPathRenderer.cpp b/src/gpu/batches/GrPLSPathRenderer.cpp
index 348681f43f..28ad2069bf 100644
--- a/src/gpu/batches/GrPLSPathRenderer.cpp
+++ b/src/gpu/batches/GrPLSPathRenderer.cpp
@@ -873,7 +873,7 @@ public:
}
if (triVertices.count()) {
- const GrVertexBuffer* triVertexBuffer;
+ const GrBuffer* triVertexBuffer;
int firstTriVertex;
size_t triStride = triangleProcessor->getVertexStride();
PLSVertex* triVerts = reinterpret_cast<PLSVertex*>(target->makeVertexSpace(
@@ -892,7 +892,7 @@ public:
}
if (quadVertices.count()) {
- const GrVertexBuffer* quadVertexBuffer;
+ const GrBuffer* quadVertexBuffer;
int firstQuadVertex;
size_t quadStride = quadProcessor->getVertexStride();
PLSVertex* quadVerts = reinterpret_cast<PLSVertex*>(target->makeVertexSpace(
@@ -916,7 +916,7 @@ public:
SkPath::FillType::kEvenOdd_FillType,
invert,
this->usesLocalCoords()));
- const GrVertexBuffer* rectVertexBuffer;
+ const GrBuffer* rectVertexBuffer;
size_t finishStride = finishProcessor->getVertexStride();
int firstRectVertex;
static const int kRectVertexCount = 6;
diff --git a/src/gpu/batches/GrTInstanceBatch.h b/src/gpu/batches/GrTInstanceBatch.h
index 22d4f52792..fdd0662d06 100644
--- a/src/gpu/batches/GrTInstanceBatch.h
+++ b/src/gpu/batches/GrTInstanceBatch.h
@@ -34,7 +34,7 @@
* const GrGeometryProcessor* CreateGP(const Geometry& seedGeometry,
* const GrXPOverridesForBatch& overrides)
*
- * const GrIndexBuffer* GetIndexBuffer(GrResourceProvider*)
+ * const GrBuffer* GetIndexBuffer(GrResourceProvider*)
*
* Tesselate(intptr_t vertices, size_t vertexStride, const Geometry& geo,
* const GrXPOverridesForBatch& overrides)
@@ -101,7 +101,7 @@ private:
size_t vertexStride = gp->getVertexStride();
int instanceCount = fGeoData.count();
- SkAutoTUnref<const GrIndexBuffer> indexBuffer(
+ SkAutoTUnref<const GrBuffer> indexBuffer(
Impl::GetIndexBuffer(target->resourceProvider()));
InstancedHelper helper;
void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
diff --git a/src/gpu/batches/GrTessellatingPathRenderer.cpp b/src/gpu/batches/GrTessellatingPathRenderer.cpp
index c00e8ecbe6..4ba01884dc 100644
--- a/src/gpu/batches/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/batches/GrTessellatingPathRenderer.cpp
@@ -45,7 +45,7 @@ private:
}
};
-bool cache_match(GrVertexBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
+bool cache_match(GrBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
if (!vertexBuffer) {
return false;
}
@@ -68,8 +68,8 @@ public:
}
SkPoint* lock(int vertexCount) override {
size_t size = vertexCount * sizeof(SkPoint);
- fVertexBuffer.reset(fResourceProvider->createVertexBuffer(
- size, GrResourceProvider::kStatic_BufferUsage, 0));
+ fVertexBuffer.reset(fResourceProvider->createBuffer(
+ kVertex_GrBufferType, size, kStatic_GrAccessPattern, 0));
if (!fVertexBuffer.get()) {
return nullptr;
}
@@ -89,9 +89,9 @@ public:
}
fVertices = nullptr;
}
- GrVertexBuffer* vertexBuffer() { return fVertexBuffer.get(); }
+ GrBuffer* vertexBuffer() { return fVertexBuffer.get(); }
private:
- SkAutoTUnref<GrVertexBuffer> fVertexBuffer;
+ SkAutoTUnref<GrBuffer> fVertexBuffer;
GrResourceProvider* fResourceProvider;
bool fCanMapVB;
SkPoint* fVertices;
@@ -158,8 +158,7 @@ private:
fStroke.asUniqueKeyFragment(&builder[2 + clipBoundsSize32]);
builder.finish();
GrResourceProvider* rp = target->resourceProvider();
- SkAutoTUnref<GrVertexBuffer> cachedVertexBuffer(
- rp->findAndRefTByUniqueKey<GrVertexBuffer>(key));
+ SkAutoTUnref<GrBuffer> cachedVertexBuffer(rp->findAndRefTByUniqueKey<GrBuffer>(key));
int actualCount;
SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance;
SkScalar tol = GrPathUtils::scaleToleranceToSrc(
@@ -226,7 +225,7 @@ private:
this->draw(target, gp.get());
}
- void drawVertices(Target* target, const GrGeometryProcessor* gp, const GrVertexBuffer* vb,
+ void drawVertices(Target* target, const GrGeometryProcessor* gp, const GrBuffer* vb,
int firstVertex, int count) const {
SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h
index 3efa3578d5..c28f98d716 100644
--- a/src/gpu/batches/GrTestBatch.h
+++ b/src/gpu/batches/GrTestBatch.h
@@ -10,7 +10,6 @@
#include "GrBatchFlushState.h"
#include "GrGeometryProcessor.h"
-#include "GrVertexBuffer.h"
#include "batches/GrVertexBatch.h"
diff --git a/src/gpu/batches/GrVertexBatch.cpp b/src/gpu/batches/GrVertexBatch.cpp
index 17bcf8e3af..fc7a1e4fae 100644
--- a/src/gpu/batches/GrVertexBatch.cpp
+++ b/src/gpu/batches/GrVertexBatch.cpp
@@ -17,14 +17,14 @@ void GrVertexBatch::onPrepare(GrBatchFlushState* state) {
}
void* GrVertexBatch::InstancedHelper::init(Target* target, GrPrimitiveType primType,
- size_t vertexStride, const GrIndexBuffer* indexBuffer,
+ size_t vertexStride, const GrBuffer* indexBuffer,
int verticesPerInstance, int indicesPerInstance,
int instancesToDraw) {
SkASSERT(target);
if (!indexBuffer) {
return nullptr;
}
- const GrVertexBuffer* vertexBuffer;
+ const GrBuffer* vertexBuffer;
int firstVertex;
int vertexCount = verticesPerInstance * instancesToDraw;
void* vertices = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
@@ -49,7 +49,7 @@ void GrVertexBatch::InstancedHelper::recordDraw(Target* target) {
void* GrVertexBatch::QuadHelper::init(Target* target, size_t vertexStride,
int quadsToDraw) {
- SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer(
+ SkAutoTUnref<const GrBuffer> quadIndexBuffer(
target->resourceProvider()->refQuadIndexBuffer());
if (!quadIndexBuffer) {
SkDebugf("Could not get quad index buffer.");
diff --git a/src/gpu/batches/GrVertexBatch.h b/src/gpu/batches/GrVertexBatch.h
index 16ba603962..2af4dd1cad 100644
--- a/src/gpu/batches/GrVertexBatch.h
+++ b/src/gpu/batches/GrVertexBatch.h
@@ -35,7 +35,7 @@ protected:
/** Returns the allocated storage for the vertices. The caller should populate the before
vertices before calling issueDraws(). */
void* init(Target*, GrPrimitiveType, size_t vertexStride,
- const GrIndexBuffer*, int verticesPerInstance, int indicesPerInstance,
+ const GrBuffer*, int verticesPerInstance, int indicesPerInstance,
int instancesToDraw);
/** Call after init() to issue draws to the batch target.*/
diff --git a/src/gpu/effects/GrDashingEffect.cpp b/src/gpu/effects/GrDashingEffect.cpp
index 51507b1e66..344c125f7e 100644
--- a/src/gpu/effects/GrDashingEffect.cpp
+++ b/src/gpu/effects/GrDashingEffect.cpp
@@ -17,7 +17,6 @@
#include "GrInvariantOutput.h"
#include "GrProcessor.h"
#include "GrStrokeInfo.h"
-#include "GrVertexBuffer.h"
#include "SkGr.h"
#include "batches/GrVertexBatch.h"
#include "glsl/GrGLSLFragmentShaderBuilder.h"
diff --git a/src/gpu/gl/GrGLBuffer.cpp b/src/gpu/gl/GrGLBuffer.cpp
new file mode 100644
index 0000000000..ad56ceb729
--- /dev/null
+++ b/src/gpu/gl/GrGLBuffer.cpp
@@ -0,0 +1,336 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLBuffer.h"
+#include "GrGLGpu.h"
+#include "SkTraceMemoryDump.h"
+
+#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
+
+#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
+#else
+ #define CLEAR_ERROR_BEFORE_ALLOC(iface)
+ #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
+ #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
+#endif
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, GrBufferType type, size_t size,
+ GrAccessPattern accessPattern) {
+ static const int kIsVertexOrIndex = (1 << kVertex_GrBufferType) | (1 << kIndex_GrBufferType);
+ bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() &&
+ kDynamic_GrAccessPattern == accessPattern &&
+ ((kIsVertexOrIndex >> type) & 1);
+ SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, type, size, accessPattern, cpuBacked));
+ if (!cpuBacked && 0 == buffer->fBufferID) {
+ return nullptr;
+ }
+ return buffer.release();
+}
+
+// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
+// objects are implemented as client-side-arrays on tile-deferred architectures.
+#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
+
+inline static void get_target_and_usage(GrBufferType type, GrAccessPattern accessPattern,
+ const GrGLCaps& caps, GrGLenum* target, GrGLenum* usage) {
+ static const GrGLenum nonXferTargets[] = {
+ GR_GL_ARRAY_BUFFER,
+ GR_GL_ELEMENT_ARRAY_BUFFER
+ };
+ GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
+ GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
+
+ static const GrGLenum drawUsages[] = {
+ DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
+ GR_GL_STATIC_DRAW,
+ GR_GL_STREAM_DRAW
+ };
+ static const GrGLenum readUsages[] = {
+ GR_GL_DYNAMIC_READ,
+ GR_GL_STATIC_READ,
+ GR_GL_STREAM_READ
+ };
+ GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern);
+ GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern);
+ GR_STATIC_ASSERT(2 == kStream_GrAccessPattern);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern);
+
+ SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern);
+
+ switch (type) {
+ case kVertex_GrBufferType:
+ case kIndex_GrBufferType:
+ *target = nonXferTargets[type];
+ *usage = drawUsages[accessPattern];
+ break;
+ case kXferCpuToGpu_GrBufferType:
+ if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferType()) {
+ *target = GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
+ } else {
+ SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBufferType());
+ *target = GR_GL_PIXEL_UNPACK_BUFFER;
+ }
+ *usage = drawUsages[accessPattern];
+ break;
+ case kXferGpuToCpu_GrBufferType:
+ if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferType()) {
+ *target = GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
+ } else {
+ SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBufferType());
+ *target = GR_GL_PIXEL_PACK_BUFFER;
+ }
+ *usage = readUsages[accessPattern];
+ break;
+ default:
+ SkFAIL("Unexpected buffer type.");
+ break;
+ }
+}
+
+GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, GrBufferType type, size_t size, GrAccessPattern accessPattern,
+ bool cpuBacked)
+ : INHERITED(gpu, type, size, accessPattern, cpuBacked),
+ fCPUData(nullptr),
+ fTarget(0),
+ fBufferID(0),
+ fSizeInBytes(size),
+ fUsage(0),
+ fGLSizeInBytes(0) {
+ if (cpuBacked) {
+ if (gpu->caps()->mustClearUploadedBufferData()) {
+ fCPUData = sk_calloc_throw(fSizeInBytes);
+ } else {
+ fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW);
+ }
+ } else {
+ GL_CALL(GenBuffers(1, &fBufferID));
+ fSizeInBytes = size;
+ get_target_and_usage(type, accessPattern, gpu->glCaps(), &fTarget, &fUsage);
+ if (fBufferID) {
+ gpu->bindBuffer(fBufferID, fTarget);
+ CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface());
+ // make sure driver can allocate memory for this buffer
+ GL_ALLOC_CALL(gpu->glInterface(), BufferData(fTarget,
+ (GrGLsizeiptr) fSizeInBytes,
+ nullptr, // data ptr
+ fUsage));
+ if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) {
+ gpu->releaseBuffer(fBufferID, fTarget);
+ fBufferID = 0;
+ } else {
+ fGLSizeInBytes = fSizeInBytes;
+ }
+ }
+ }
+ VALIDATE();
+ this->registerWithCache();
+}
+
+inline GrGLGpu* GrGLBuffer::glGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrGLGpu*>(this->getGpu());
+}
+
+inline const GrGLCaps& GrGLBuffer::glCaps() const {
+ return this->glGpu()->glCaps();
+}
+
+void GrGLBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ VALIDATE();
+ // make sure we've not been abandoned or already released
+ if (fCPUData) {
+ SkASSERT(!fBufferID);
+ sk_free(fCPUData);
+ fCPUData = nullptr;
+ } else if (fBufferID) {
+ this->glGpu()->releaseBuffer(fBufferID, fTarget);
+ fBufferID = 0;
+ fGLSizeInBytes = 0;
+ }
+ fMapPtr = nullptr;
+ VALIDATE();
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrGLBuffer::onAbandon() {
+ fBufferID = 0;
+ fGLSizeInBytes = 0;
+ fMapPtr = nullptr;
+ sk_free(fCPUData);
+ fCPUData = nullptr;
+ VALIDATE();
+ INHERITED::onAbandon();
+}
+
+void GrGLBuffer::onMap() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+
+ VALIDATE();
+ SkASSERT(!this->isMapped());
+
+ if (0 == fBufferID) {
+ fMapPtr = fCPUData;
+ VALIDATE();
+ return;
+ }
+
+ bool readOnly = (kXferGpuToCpu_GrBufferType == this->type());
+
+ // Handling dirty context is done in the bindBuffer call
+ switch (this->glCaps().mapBufferType()) {
+ case GrGLCaps::kNone_MapBufferType:
+ break;
+ case GrGLCaps::kMapBuffer_MapBufferType:
+ this->glGpu()->bindBuffer(fBufferID, fTarget);
+ // Let driver know it can discard the old data
+ if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInBytes) {
+ GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
+ }
+ GL_CALL_RET(fMapPtr, MapBuffer(fTarget, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
+ break;
+ case GrGLCaps::kMapBufferRange_MapBufferType: {
+ this->glGpu()->bindBuffer(fBufferID, fTarget);
+ // Make sure the GL buffer size agrees with fDesc before mapping.
+ if (fGLSizeInBytes != fSizeInBytes) {
+ GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
+ }
+ GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
+ // TODO: allow the client to specify invalidation in the transfer buffer case.
+ if (kXferCpuToGpu_GrBufferType != this->type()) {
+ writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
+ }
+ GL_CALL_RET(fMapPtr, MapBufferRange(fTarget, 0, fSizeInBytes,
+ readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
+ break;
+ }
+ case GrGLCaps::kChromium_MapBufferType:
+ this->glGpu()->bindBuffer(fBufferID, fTarget);
+ // Make sure the GL buffer size agrees with fDesc before mapping.
+ if (fGLSizeInBytes != fSizeInBytes) {
+ GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
+ }
+ GL_CALL_RET(fMapPtr, MapBufferSubData(fTarget, 0, fSizeInBytes,
+ readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
+ break;
+ }
+ fGLSizeInBytes = fSizeInBytes;
+ VALIDATE();
+}
+
+void GrGLBuffer::onUnmap() {
+ if (this->wasDestroyed()) {
+ return;
+ }
+
+ VALIDATE();
+ SkASSERT(this->isMapped());
+ if (0 == fBufferID) {
+ fMapPtr = nullptr;
+ return;
+ }
+ // bind buffer handles the dirty context
+ switch (this->glCaps().mapBufferType()) {
+ case GrGLCaps::kNone_MapBufferType:
+ SkDEBUGFAIL("Shouldn't get here.");
+ return;
+ case GrGLCaps::kMapBuffer_MapBufferType: // fall through
+ case GrGLCaps::kMapBufferRange_MapBufferType:
+ this->glGpu()->bindBuffer(fBufferID, fTarget);
+ GL_CALL(UnmapBuffer(fTarget));
+ break;
+ case GrGLCaps::kChromium_MapBufferType:
+ this->glGpu()->bindBuffer(fBufferID, fTarget);
+ GL_CALL(UnmapBufferSubData(fMapPtr));
+ break;
+ }
+ fMapPtr = nullptr;
+}
+
+bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (this->wasDestroyed()) {
+ return false;
+ }
+
+ SkASSERT(!this->isMapped());
+ SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTarget);
+ VALIDATE();
+ if (srcSizeInBytes > fSizeInBytes) {
+ return false;
+ }
+ if (0 == fBufferID) {
+ memcpy(fCPUData, src, srcSizeInBytes);
+ return true;
+ }
+ SkASSERT(srcSizeInBytes <= fSizeInBytes);
+ // bindbuffer handles dirty context
+ this->glGpu()->bindBuffer(fBufferID, fTarget);
+
+#if GR_GL_USE_BUFFER_DATA_NULL_HINT
+ if (fSizeInBytes == srcSizeInBytes) {
+ GL_CALL(BufferData(fTarget, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
+ } else {
+ // Before we call glBufferSubData we give the driver a hint using
+ // glBufferData with nullptr. This makes the old buffer contents
+ // inaccessible to future draws. The GPU may still be processing
+ // draws that reference the old contents. With this hint it can
+ // assign a different allocation for the new contents to avoid
+ // flushing the gpu past draws consuming the old contents.
+ // TODO I think we actually want to try calling bufferData here
+ GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
+ GL_CALL(BufferSubData(fTarget, 0, (GrGLsizeiptr) srcSizeInBytes, src));
+ }
+ fGLSizeInBytes = fSizeInBytes;
+#else
+ // Note that we're cheating on the size here. Currently no methods
+ // allow a partial update that preserves contents of non-updated
+ // portions of the buffer (map() does a glBufferData(..size, nullptr..))
+ GL_CALL(BufferData(fTarget, srcSizeInBytes, src, fUsage));
+ fGLSizeInBytes = srcSizeInBytes;
+#endif
+ VALIDATE();
+ return true;
+}
+
+void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString buffer_id;
+ buffer_id.appendU32(this->bufferID());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
+ buffer_id.c_str());
+}
+
+#ifdef SK_DEBUG
+
+void GrGLBuffer::validate() const {
+ SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTarget ||
+ GR_GL_PIXEL_PACK_BUFFER == fTarget || GR_GL_PIXEL_UNPACK_BUFFER == fTarget ||
+ GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fTarget ||
+ GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fTarget);
+ // The following assert isn't valid when the buffer has been abandoned:
+ // SkASSERT((0 == fDesc.fID) == (fCPUData));
+ SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
+ SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes);
+ SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr);
+}
+
+#endif
diff --git a/src/gpu/gl/GrGLBuffer.h b/src/gpu/gl/GrGLBuffer.h
new file mode 100644
index 0000000000..90d2c43dfa
--- /dev/null
+++ b/src/gpu/gl/GrGLBuffer.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLBuffer_DEFINED
+#define GrGLBuffer_DEFINED
+
+#include "GrBuffer.h"
+#include "gl/GrGLTypes.h"
+
+class GrGLGpu;
+class GrGLCaps;
+
+class GrGLBuffer : public GrBuffer {
+public:
+ static GrGLBuffer* Create(GrGLGpu*, GrBufferType, size_t size, GrAccessPattern);
+
+ ~GrGLBuffer() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(0 == fBufferID);
+ }
+
+ GrGLenum target() const { return fTarget; }
+ GrGLuint bufferID() const { return fBufferID; }
+ size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
+
+protected:
+ GrGLBuffer(GrGLGpu*, GrBufferType, size_t size, GrAccessPattern, bool cpuBacked);
+
+ void onAbandon() override;
+ void onRelease() override;
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+private:
+ GrGLGpu* glGpu() const;
+ const GrGLCaps& glCaps() const;
+
+ void onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+#ifdef SK_DEBUG
+ void validate() const;
+#endif
+
+ void* fCPUData;
+ GrGLenum fTarget; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER, e.g.
+ GrGLuint fBufferID;
+ size_t fSizeInBytes;
+ GrGLenum fUsage;
+ size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
+ // smaller or larger than the size in fDesc.
+
+ typedef GrBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/gl/GrGLBufferImpl.cpp b/src/gpu/gl/GrGLBufferImpl.cpp
deleted file mode 100644
index 2babce8028..0000000000
--- a/src/gpu/gl/GrGLBufferImpl.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright 2013 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "GrGLBufferImpl.h"
-#include "GrGLGpu.h"
-
-#define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X)
-
-#ifdef SK_DEBUG
-#define VALIDATE() this->validate()
-#else
-#define VALIDATE() do {} while(false)
-#endif
-
-GrGLBufferImpl::GrGLBufferImpl(GrGLGpu* gpu, const Desc& desc, GrGLenum bufferType)
- : fDesc(desc)
- , fBufferType(bufferType)
- , fMapPtr(nullptr) {
- if (0 == desc.fID) {
- if (gpu->caps()->mustClearUploadedBufferData()) {
- fCPUData = sk_calloc_throw(desc.fSizeInBytes);
- } else {
- fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
- }
- fGLSizeInBytes = 0;
- } else {
- fCPUData = nullptr;
- // We assume that the GL buffer was created at the desc's size initially.
- fGLSizeInBytes = fDesc.fSizeInBytes;
- }
- VALIDATE();
-}
-
-void GrGLBufferImpl::release(GrGLGpu* gpu) {
- VALIDATE();
- // make sure we've not been abandoned or already released
- if (fCPUData) {
- sk_free(fCPUData);
- fCPUData = nullptr;
- } else if (fDesc.fID) {
- gpu->releaseBuffer(fDesc.fID, fBufferType);
- fDesc.fID = 0;
- fGLSizeInBytes = 0;
- }
- fMapPtr = nullptr;
- VALIDATE();
-}
-
-void GrGLBufferImpl::abandon() {
- fDesc.fID = 0;
- fGLSizeInBytes = 0;
- fMapPtr = nullptr;
- sk_free(fCPUData);
- fCPUData = nullptr;
- VALIDATE();
-}
-
-void* GrGLBufferImpl::map(GrGLGpu* gpu) {
- VALIDATE();
- SkASSERT(!this->isMapped());
- if (0 == fDesc.fID) {
- fMapPtr = fCPUData;
- } else {
- fMapPtr = gpu->mapBuffer(fDesc.fID, fBufferType, fDesc.fUsage, fGLSizeInBytes,
- fDesc.fSizeInBytes);
- fGLSizeInBytes = fDesc.fSizeInBytes;
- }
- VALIDATE();
- return fMapPtr;
-}
-
-void GrGLBufferImpl::unmap(GrGLGpu* gpu) {
- VALIDATE();
- SkASSERT(this->isMapped());
- if (0 != fDesc.fID) {
- gpu->unmapBuffer(fDesc.fID, fBufferType, fMapPtr);
- }
- fMapPtr = nullptr;
-}
-
-bool GrGLBufferImpl::isMapped() const {
- VALIDATE();
- return SkToBool(fMapPtr);
-}
-
-bool GrGLBufferImpl::updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInBytes) {
- SkASSERT(!this->isMapped());
- SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
- VALIDATE();
- if (srcSizeInBytes > fDesc.fSizeInBytes) {
- return false;
- }
- if (0 == fDesc.fID) {
- memcpy(fCPUData, src, srcSizeInBytes);
- return true;
- }
- gpu->bufferData(fDesc.fID, fBufferType, fDesc.fUsage, fDesc.fSizeInBytes, src,
- srcSizeInBytes);
-#if GR_GL_USE_BUFFER_DATA_NULL_HINT
- fGLSizeInBytes = fDesc.fSizeInBytes;
-#else
- fGLSizeInBytes = srcSizeInBytes;
-#endif
- VALIDATE();
- return true;
-}
-
-void GrGLBufferImpl::validate() const {
- SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType ||
- GR_GL_PIXEL_PACK_BUFFER == fBufferType || GR_GL_PIXEL_UNPACK_BUFFER == fBufferType ||
- GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fBufferType ||
- GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fBufferType);
- // The following assert isn't valid when the buffer has been abandoned:
- // SkASSERT((0 == fDesc.fID) == (fCPUData));
- SkASSERT(nullptr == fCPUData || 0 == fGLSizeInBytes);
- SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fDesc.fSizeInBytes);
- SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr);
-}
diff --git a/src/gpu/gl/GrGLBufferImpl.h b/src/gpu/gl/GrGLBufferImpl.h
deleted file mode 100644
index a8f2cced37..0000000000
--- a/src/gpu/gl/GrGLBufferImpl.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2013 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrGLBufferImpl_DEFINED
-#define GrGLBufferImpl_DEFINED
-
-#include "SkTypes.h"
-#include "gl/GrGLTypes.h"
-
-class GrGLGpu;
-
-/**
- * This class serves as the implementation of GrGL*Buffer classes. It was written to avoid code
- * duplication in those classes.
- */
-class GrGLBufferImpl : SkNoncopyable {
-public:
- enum Usage {
- kStaticDraw_Usage = 0,
- kDynamicDraw_Usage,
- kStreamDraw_Usage,
- kStreamRead_Usage,
-
- kLast_Usage = kStreamRead_Usage
- };
- static const int kUsageCount = kLast_Usage + 1;
-
- struct Desc {
- GrGLuint fID; // set to 0 to indicate buffer is CPU-backed and not a VBO.
- size_t fSizeInBytes;
- Usage fUsage;
- };
-
- GrGLBufferImpl(GrGLGpu*, const Desc&, GrGLenum bufferType);
- ~GrGLBufferImpl() {
- // either release or abandon should have been called by the owner of this object.
- SkASSERT(0 == fDesc.fID);
- }
-
- void abandon();
- void release(GrGLGpu* gpu);
-
- GrGLuint bufferID() const { return fDesc.fID; }
- size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
- GrGLenum bufferType() const { return fBufferType; }
-
- void* map(GrGLGpu* gpu);
- void unmap(GrGLGpu* gpu);
- bool isMapped() const;
- bool updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInBytes);
-
-private:
- void validate() const;
-
- Desc fDesc;
- GrGLenum fBufferType; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER, e.g.
- void* fCPUData;
- void* fMapPtr;
- size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
- // smaller or larger than the size in fDesc.
-
- typedef SkNoncopyable INHERITED;
-};
-
-#endif
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index 50a74febe7..51974d4c02 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -400,14 +400,14 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
// On many GPUs, map memory is very expensive, so we effectively disable it here by setting the
// threshold to the maximum unless the client gives us a hint that map memory is cheap.
- if (fGeometryBufferMapThreshold < 0) {
+ if (fBufferMapThreshold < 0) {
// We think mapping on Chromium will be cheaper once we know ahead of time how much space
// we will use for all GrBatchs. Right now we might wind up mapping a large buffer and using
// a small subset.
#if 0
- fGeometryBufferMapThreshold = kChromium_GrGLDriver == ctxInfo.driver() ? 0 : SK_MaxS32;
+ fBufferMapThreshold = kChromium_GrGLDriver == ctxInfo.driver() ? 0 : SK_MaxS32;
#else
- fGeometryBufferMapThreshold = SK_MaxS32;
+ fBufferMapThreshold = SK_MaxS32;
#endif
}
diff --git a/src/gpu/gl/GrGLDefines.h b/src/gpu/gl/GrGLDefines.h
index ff4c457250..c97e605ba8 100644
--- a/src/gpu/gl/GrGLDefines.h
+++ b/src/gpu/gl/GrGLDefines.h
@@ -122,7 +122,9 @@
#define GR_GL_STREAM_DRAW 0x88E0
#define GR_GL_STREAM_READ 0x88E1
#define GR_GL_STATIC_DRAW 0x88E4
+#define GR_GL_STATIC_READ 0x88E5
#define GR_GL_DYNAMIC_DRAW 0x88E8
+#define GR_GL_DYNAMIC_READ 0x88E9
#define GR_GL_BUFFER_SIZE 0x8764
#define GR_GL_BUFFER_USAGE 0x8765
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 3a293610f2..6b591494b9 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -6,6 +6,7 @@
*/
#include "GrGLGpu.h"
+#include "GrGLBuffer.h"
#include "GrGLGLSL.h"
#include "GrGLStencilAttachment.h"
#include "GrGLTextureRenderTarget.h"
@@ -843,7 +844,7 @@ bool GrGLGpu::onWritePixels(GrSurface* surface,
bool GrGLGpu::onTransferPixels(GrSurface* surface,
int left, int top, int width, int height,
- GrPixelConfig config, GrTransferBuffer* buffer,
+ GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) {
GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
@@ -859,16 +860,14 @@ bool GrGLGpu::onTransferPixels(GrSurface* surface,
this->setScratchTextureUnit();
GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
- SkASSERT(!buffer->isMapped());
- GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer);
- // bind the transfer buffer
- SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() ||
- GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType());
- GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
+ SkASSERT(kXferCpuToGpu_GrBufferType == transferBuffer->type());
+ SkASSERT(!transferBuffer->isMapped());
+ const GrGLBuffer* glBuffer = reinterpret_cast<const GrGLBuffer*>(transferBuffer);
+ this->bindBuffer(glBuffer->bufferID(), glBuffer->target());
bool success = false;
GrMipLevel mipLevel;
- mipLevel.fPixels = buffer;
+ mipLevel.fPixels = transferBuffer;
mipLevel.fRowBytes = rowBytes;
SkSTArray<1, GrMipLevel> texels;
texels.push_back(mipLevel);
@@ -1933,111 +1932,8 @@ GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRen
// objects are implemented as client-side-arrays on tile-deferred architectures.
#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
-GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
- GrGLVertexBuffer::Desc desc;
- desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
- desc.fSizeInBytes = size;
-
- if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
- desc.fID = 0;
- GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
- return vertexBuffer;
- } else {
- desc.fID = 0;
- GL_CALL(GenBuffers(1, &desc.fID));
- if (desc.fID) {
- fHWGeometryState.setVertexBufferID(this, desc.fID);
- CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
- // make sure driver can allocate memory for this buffer
- GL_ALLOC_CALL(this->glInterface(),
- BufferData(GR_GL_ARRAY_BUFFER,
- (GrGLsizeiptr) desc.fSizeInBytes,
- nullptr, // data ptr
- dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
- if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
- GL_CALL(DeleteBuffers(1, &desc.fID));
- this->notifyVertexBufferDelete(desc.fID);
- return nullptr;
- }
- GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
- return vertexBuffer;
- }
- return nullptr;
- }
-}
-
-GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
- GrGLIndexBuffer::Desc desc;
- desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
- desc.fSizeInBytes = size;
-
- if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
- desc.fID = 0;
- GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
- return indexBuffer;
- } else {
- desc.fID = 0;
- GL_CALL(GenBuffers(1, &desc.fID));
- if (desc.fID) {
- fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID);
- CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
- // make sure driver can allocate memory for this buffer
- GL_ALLOC_CALL(this->glInterface(),
- BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
- (GrGLsizeiptr) desc.fSizeInBytes,
- nullptr, // data ptr
- dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
- if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
- GL_CALL(DeleteBuffers(1, &desc.fID));
- this->notifyIndexBufferDelete(desc.fID);
- return nullptr;
- }
- GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
- return indexBuffer;
- }
- return nullptr;
- }
-}
-
-GrTransferBuffer* GrGLGpu::onCreateTransferBuffer(size_t size, TransferType xferType) {
- GrGLCaps::TransferBufferType xferBufferType = this->ctxInfo().caps()->transferBufferType();
- if (GrGLCaps::kNone_TransferBufferType == xferBufferType) {
- return nullptr;
- }
-
- GrGLTransferBuffer::Desc desc;
- bool toGpu = (kCpuToGpu_TransferType == xferType);
- desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kStreamRead_Usage;
-
- desc.fSizeInBytes = size;
- desc.fID = 0;
- GL_CALL(GenBuffers(1, &desc.fID));
- if (desc.fID) {
- CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
- // make sure driver can allocate memory for this bmapuffer
- GrGLenum target;
- if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) {
- target = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM
- : GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
- } else {
- SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType);
- target = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER;
- }
- GL_CALL(BindBuffer(target, desc.fID));
- GL_ALLOC_CALL(this->glInterface(),
- BufferData(target,
- (GrGLsizeiptr) desc.fSizeInBytes,
- nullptr, // data ptr
- (toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ)));
- if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
- GL_CALL(DeleteBuffers(1, &desc.fID));
- return nullptr;
- }
- GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, target);
- return transferBuffer;
- }
-
- return nullptr;
+GrBuffer* GrGLGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
+ return GrGLBuffer::Create(this, type, size, accessPattern);
}
void GrGLGpu::flushScissor(const GrScissorState& scissorState,
@@ -2122,18 +2018,18 @@ bool GrGLGpu::flushGLState(const GrPipeline& pipeline, const GrPrimitiveProcesso
void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
const GrNonInstancedMesh& mesh,
size_t* indexOffsetInBytes) {
- GrGLVertexBuffer* vbuf;
- vbuf = (GrGLVertexBuffer*) mesh.vertexBuffer();
+ const GrGLBuffer* vbuf;
+ vbuf = static_cast<const GrGLBuffer*>(mesh.vertexBuffer());
SkASSERT(vbuf);
SkASSERT(!vbuf->isMapped());
- GrGLIndexBuffer* ibuf = nullptr;
+ const GrGLBuffer* ibuf = nullptr;
if (mesh.isIndexed()) {
SkASSERT(indexOffsetInBytes);
*indexOffsetInBytes = 0;
- ibuf = (GrGLIndexBuffer*)mesh.indexBuffer();
+ ibuf = static_cast<const GrGLBuffer*>(mesh.indexBuffer());
SkASSERT(ibuf);
SkASSERT(!ibuf->isMapped());
@@ -2223,113 +2119,6 @@ void GrGLGpu::releaseBuffer(GrGLuint id, GrGLenum type) {
}
}
-static GrGLenum get_gl_usage(GrGLBufferImpl::Usage usage) {
- static const GrGLenum grToGL[] = {
- GR_GL_STATIC_DRAW, // GrGLBufferImpl::kStaticDraw_Usage
- DYNAMIC_USAGE_PARAM, // GrGLBufferImpl::kDynamicDraw_Usage
- GR_GL_STREAM_DRAW, // GrGLBufferImpl::kStreamDraw_Usage
- GR_GL_STREAM_READ, // GrGLBufferImpl::kStreamRead_Usage
- };
- static_assert(SK_ARRAY_COUNT(grToGL) == GrGLBufferImpl::kUsageCount, "array_size_mismatch");
-
- return grToGL[usage];
-}
-
-void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
- size_t currentSize, size_t requestedSize) {
- void* mapPtr = nullptr;
- GrGLenum glUsage = get_gl_usage(usage);
- bool readOnly = (GrGLBufferImpl::kStreamRead_Usage == usage);
-
- // Handling dirty context is done in the bindBuffer call
- switch (this->glCaps().mapBufferType()) {
- case GrGLCaps::kNone_MapBufferType:
- break;
- case GrGLCaps::kMapBuffer_MapBufferType:
- this->bindBuffer(id, type);
- // Let driver know it can discard the old data
- if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize) {
- GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
- }
- GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
- break;
- case GrGLCaps::kMapBufferRange_MapBufferType: {
- this->bindBuffer(id, type);
- // Make sure the GL buffer size agrees with fDesc before mapping.
- if (currentSize != requestedSize) {
- GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
- }
- GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
- // TODO: allow the client to specify invalidation in the stream draw case
- if (GrGLBufferImpl::kStreamDraw_Usage != usage) {
- writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
- }
- GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly ?
- GR_GL_MAP_READ_BIT :
- writeAccess));
- break;
- }
- case GrGLCaps::kChromium_MapBufferType:
- this->bindBuffer(id, type);
- // Make sure the GL buffer size agrees with fDesc before mapping.
- if (currentSize != requestedSize) {
- GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
- }
- GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnly ?
- GR_GL_READ_ONLY :
- GR_GL_WRITE_ONLY));
- break;
- }
- return mapPtr;
-}
-
-void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
- size_t currentSize, const void* src, size_t srcSizeInBytes) {
- SkASSERT(srcSizeInBytes <= currentSize);
- // bindbuffer handles dirty context
- this->bindBuffer(id, type);
- GrGLenum glUsage = get_gl_usage(usage);
-
-#if GR_GL_USE_BUFFER_DATA_NULL_HINT
- if (currentSize == srcSizeInBytes) {
- GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, glUsage));
- } else {
- // Before we call glBufferSubData we give the driver a hint using
- // glBufferData with nullptr. This makes the old buffer contents
- // inaccessible to future draws. The GPU may still be processing
- // draws that reference the old contents. With this hint it can
- // assign a different allocation for the new contents to avoid
- // flushing the gpu past draws consuming the old contents.
- // TODO I think we actually want to try calling bufferData here
- GL_CALL(BufferData(type, currentSize, nullptr, glUsage));
- GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src));
- }
-#else
- // Note that we're cheating on the size here. Currently no methods
- // allow a partial update that preserves contents of non-updated
- // portions of the buffer (map() does a glBufferData(..size, nullptr..))
- GL_CALL(BufferData(type, srcSizeInBytes, src, glUsage));
-#endif
-}
-
-void GrGLGpu::unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr) {
- // bind buffer handles the dirty context
- switch (this->glCaps().mapBufferType()) {
- case GrGLCaps::kNone_MapBufferType:
- SkDEBUGFAIL("Shouldn't get here.");
- return;
- case GrGLCaps::kMapBuffer_MapBufferType: // fall through
- case GrGLCaps::kMapBufferRange_MapBufferType:
- this->bindBuffer(id, type);
- GL_CALL(UnmapBuffer(type));
- break;
- case GrGLCaps::kChromium_MapBufferType:
- this->bindBuffer(id, type);
- GL_CALL(UnmapBufferSubData(mapPtr));
- break;
- }
-}
-
void GrGLGpu::disableScissor() {
if (kNo_TriState != fHWScissorSettings.fEnabled) {
GL_CALL(Disable(GR_GL_SCISSOR_TEST));
@@ -4351,8 +4140,8 @@ void GrGLGpu::resetShaderCacheForTesting() const {
///////////////////////////////////////////////////////////////////////////////
GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
GrGLGpu* gpu,
- const GrGLVertexBuffer* vbuffer,
- const GrGLIndexBuffer* ibuffer) {
+ const GrGLBuffer* vbuffer,
+ const GrGLBuffer* ibuffer) {
SkASSERT(vbuffer);
GrGLuint vbufferID = vbuffer->bufferID();
GrGLuint* ibufferIDPtr = nullptr;
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index bcb3c193dc..bc79d2f427 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -10,15 +10,12 @@
#include "GrGLContext.h"
#include "GrGLIRect.h"
-#include "GrGLIndexBuffer.h"
#include "GrGLPathRendering.h"
#include "GrGLProgram.h"
#include "GrGLRenderTarget.h"
#include "GrGLStencilAttachment.h"
#include "GrGLTexture.h"
-#include "GrGLTransferBuffer.h"
#include "GrGLVertexArray.h"
-#include "GrGLVertexBuffer.h"
#include "GrGpu.h"
#include "GrPipelineBuilder.h"
#include "GrTypes.h"
@@ -26,6 +23,7 @@
#include "SkTArray.h"
#include "SkTypes.h"
+class GrGLBuffer;
class GrPipeline;
class GrNonInstancedMesh;
class GrSwizzle;
@@ -101,15 +99,6 @@ public:
void releaseBuffer(GrGLuint id, GrGLenum type);
- // sizes are in bytes
- void* mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage, size_t currentSize,
- size_t requestedSize);
-
- void unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr);
-
- void bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage, size_t currentSize,
- const void* src, size_t srcSizeInBytes);
-
const GrGLContext* glContextForTesting() const override {
return &this->glContext();
}
@@ -149,9 +138,7 @@ private:
GrGpuResource::LifeCycle lifeCycle,
const SkTArray<GrMipLevel>& texels) override;
- GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
- GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
- GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
+ GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) override;
GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
GrWrapOwnership) override;
@@ -208,7 +195,7 @@ private:
bool onTransferPixels(GrSurface*,
int left, int top, int width, int height,
- GrPixelConfig config, GrTransferBuffer* buffer,
+ GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) override;
void onResolveRenderTarget(GrRenderTarget* target) override;
@@ -511,8 +498,8 @@ private:
* returned GrGLAttribArrayState should be used to set vertex attribute arrays.
*/
GrGLAttribArrayState* bindArrayAndBuffersToDraw(GrGLGpu* gpu,
- const GrGLVertexBuffer* vbuffer,
- const GrGLIndexBuffer* ibuffer);
+ const GrGLBuffer* vbuffer,
+ const GrGLBuffer* ibuffer);
/** Variants of the above that takes GL buffer IDs. Note that 0 does not imply that a
buffer won't be bound. The "default buffer" will be bound, which is used for client-side
diff --git a/src/gpu/gl/GrGLIndexBuffer.cpp b/src/gpu/gl/GrGLIndexBuffer.cpp
deleted file mode 100644
index 5a794ad824..0000000000
--- a/src/gpu/gl/GrGLIndexBuffer.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2011 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "GrGLIndexBuffer.h"
-#include "GrGLGpu.h"
-#include "SkTraceMemoryDump.h"
-
-GrGLIndexBuffer::GrGLIndexBuffer(GrGLGpu* gpu, const Desc& desc)
- : INHERITED(gpu, desc.fSizeInBytes, GrGLBufferImpl::kDynamicDraw_Usage == desc.fUsage,
- 0 == desc.fID)
- , fImpl(gpu, desc, GR_GL_ELEMENT_ARRAY_BUFFER) {
- this->registerWithCache();
-}
-
-void GrGLIndexBuffer::onRelease() {
- if (!this->wasDestroyed()) {
- fImpl.release(this->getGpuGL());
- }
-
- INHERITED::onRelease();
-}
-
-void GrGLIndexBuffer::onAbandon() {
- fImpl.abandon();
- INHERITED::onAbandon();
-}
-
-void* GrGLIndexBuffer::onMap() {
- if (!this->wasDestroyed()) {
- return fImpl.map(this->getGpuGL());
- } else {
- return nullptr;
- }
-}
-
-void GrGLIndexBuffer::onUnmap() {
- if (!this->wasDestroyed()) {
- fImpl.unmap(this->getGpuGL());
- }
-}
-
-bool GrGLIndexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
- if (!this->wasDestroyed()) {
- return fImpl.updateData(this->getGpuGL(), src, srcSizeInBytes);
- } else {
- return false;
- }
-}
-
-void GrGLIndexBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
- const SkString& dumpName) const {
- SkString buffer_id;
- buffer_id.appendU32(this->bufferID());
- traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
- buffer_id.c_str());
-}
diff --git a/src/gpu/gl/GrGLIndexBuffer.h b/src/gpu/gl/GrGLIndexBuffer.h
deleted file mode 100644
index 628970a0fa..0000000000
--- a/src/gpu/gl/GrGLIndexBuffer.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2011 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrGLIndexBuffer_DEFINED
-#define GrGLIndexBuffer_DEFINED
-
-#include "GrIndexBuffer.h"
-#include "GrGLBufferImpl.h"
-#include "gl/GrGLInterface.h"
-
-class GrGLGpu;
-
-class GrGLIndexBuffer : public GrIndexBuffer {
-
-public:
- typedef GrGLBufferImpl::Desc Desc;
-
- GrGLIndexBuffer(GrGLGpu* gpu, const Desc& desc);
-
- GrGLuint bufferID() const { return fImpl.bufferID(); }
- size_t baseOffset() const { return fImpl.baseOffset(); }
-
-protected:
- void onAbandon() override;
- void onRelease() override;
- void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
- const SkString& dumpName) const override;
-
-private:
- void* onMap() override;
- void onUnmap() override;
- bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
-
- GrGLGpu* getGpuGL() const {
- SkASSERT(!this->wasDestroyed());
- return (GrGLGpu*)(this->getGpu());
- }
-
- GrGLBufferImpl fImpl;
-
- typedef GrIndexBuffer INHERITED;
-};
-
-#endif
diff --git a/src/gpu/gl/GrGLTransferBuffer.cpp b/src/gpu/gl/GrGLTransferBuffer.cpp
deleted file mode 100755
index b7ee766d92..0000000000
--- a/src/gpu/gl/GrGLTransferBuffer.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "GrGLTransferBuffer.h"
-#include "GrGLGpu.h"
-#include "SkTraceMemoryDump.h"
-
-GrGLTransferBuffer::GrGLTransferBuffer(GrGLGpu* gpu, const Desc& desc, GrGLenum type)
- : INHERITED(gpu, desc.fSizeInBytes)
- , fImpl(gpu, desc, type) {
- this->registerWithCache();
-}
-
-void GrGLTransferBuffer::onRelease() {
- if (!this->wasDestroyed()) {
- fImpl.release(this->getGpuGL());
- }
-
- INHERITED::onRelease();
-}
-
-void GrGLTransferBuffer::onAbandon() {
- fImpl.abandon();
- INHERITED::onAbandon();
-}
-
-void* GrGLTransferBuffer::onMap() {
- if (!this->wasDestroyed()) {
- return fImpl.map(this->getGpuGL());
- } else {
- return nullptr;
- }
-}
-
-void GrGLTransferBuffer::onUnmap() {
- if (!this->wasDestroyed()) {
- fImpl.unmap(this->getGpuGL());
- }
-}
-
-void GrGLTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
- const SkString& dumpName) const {
- SkString buffer_id;
- buffer_id.appendU32(this->bufferID());
- traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
- buffer_id.c_str());
-}
diff --git a/src/gpu/gl/GrGLTransferBuffer.h b/src/gpu/gl/GrGLTransferBuffer.h
deleted file mode 100755
index e01d4447df..0000000000
--- a/src/gpu/gl/GrGLTransferBuffer.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrGLTransferBuffer_DEFINED
-#define GrGLTransferBuffer_DEFINED
-
-#include "GrTransferBuffer.h"
-#include "GrGLBufferImpl.h"
-#include "gl/GrGLInterface.h"
-
-class GrGLGpu;
-
-class GrGLTransferBuffer : public GrTransferBuffer {
-
-public:
- typedef GrGLBufferImpl::Desc Desc;
-
- GrGLTransferBuffer(GrGLGpu* gpu, const Desc& desc, GrGLenum type);
-
- GrGLuint bufferID() const { return fImpl.bufferID(); }
- size_t baseOffset() const { return fImpl.baseOffset(); }
- GrGLenum bufferType() const { return fImpl.bufferType(); }
-
-protected:
- void onAbandon() override;
- void onRelease() override;
- void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
- const SkString& dumpName) const override;
-
-private:
- void* onMap() override;
- void onUnmap() override;
-
- GrGLGpu* getGpuGL() const {
- SkASSERT(!this->wasDestroyed());
- return (GrGLGpu*)(this->getGpu());
- }
-
- GrGLBufferImpl fImpl;
-
- typedef GrTransferBuffer INHERITED;
-};
-
-#endif
diff --git a/src/gpu/gl/GrGLVertexArray.h b/src/gpu/gl/GrGLVertexArray.h
index f5a97672f3..4a99d59058 100644
--- a/src/gpu/gl/GrGLVertexArray.h
+++ b/src/gpu/gl/GrGLVertexArray.h
@@ -13,8 +13,6 @@
#include "gl/GrGLTypes.h"
#include "SkTArray.h"
-class GrGLVertexBuffer;
-class GrGLIndexBuffer;
class GrGLGpu;
/**
diff --git a/src/gpu/gl/GrGLVertexBuffer.cpp b/src/gpu/gl/GrGLVertexBuffer.cpp
deleted file mode 100644
index 2294844fc4..0000000000
--- a/src/gpu/gl/GrGLVertexBuffer.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2011 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "GrGLVertexBuffer.h"
-#include "GrGLGpu.h"
-#include "SkTraceMemoryDump.h"
-
-GrGLVertexBuffer::GrGLVertexBuffer(GrGLGpu* gpu, const Desc& desc)
- : INHERITED(gpu, desc.fSizeInBytes, GrGLBufferImpl::kDynamicDraw_Usage == desc.fUsage,
- 0 == desc.fID)
- , fImpl(gpu, desc, GR_GL_ARRAY_BUFFER) {
- this->registerWithCache();
-}
-
-void GrGLVertexBuffer::onRelease() {
- if (!this->wasDestroyed()) {
- fImpl.release(this->getGpuGL());
- }
-
- INHERITED::onRelease();
-}
-
-void GrGLVertexBuffer::onAbandon() {
- fImpl.abandon();
- INHERITED::onAbandon();
-}
-
-void* GrGLVertexBuffer::onMap() {
- if (!this->wasDestroyed()) {
- return fImpl.map(this->getGpuGL());
- } else {
- return nullptr;
- }
-}
-
-void GrGLVertexBuffer::onUnmap() {
- if (!this->wasDestroyed()) {
- fImpl.unmap(this->getGpuGL());
- }
-}
-
-bool GrGLVertexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
- if (!this->wasDestroyed()) {
- return fImpl.updateData(this->getGpuGL(), src, srcSizeInBytes);
- } else {
- return false;
- }
-}
-
-void GrGLVertexBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
- const SkString& dumpName) const {
- SkString buffer_id;
- buffer_id.appendU32(this->bufferID());
- traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
- buffer_id.c_str());
-}
diff --git a/src/gpu/gl/GrGLVertexBuffer.h b/src/gpu/gl/GrGLVertexBuffer.h
deleted file mode 100644
index 93c6b2badd..0000000000
--- a/src/gpu/gl/GrGLVertexBuffer.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2011 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrGLVertexBuffer_DEFINED
-#define GrGLVertexBuffer_DEFINED
-
-#include "GrVertexBuffer.h"
-#include "GrGLBufferImpl.h"
-#include "gl/GrGLInterface.h"
-
-class GrGLGpu;
-
-class GrGLVertexBuffer : public GrVertexBuffer {
-
-public:
- typedef GrGLBufferImpl::Desc Desc;
-
- GrGLVertexBuffer(GrGLGpu* gpu, const Desc& desc);
-
- GrGLuint bufferID() const { return fImpl.bufferID(); }
- size_t baseOffset() const { return fImpl.baseOffset(); }
-
-protected:
- void onAbandon() override;
- void onRelease() override;
- void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
- const SkString& dumpName) const override;
-
-private:
- void* onMap() override;
- void onUnmap() override;
- bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
-
- GrGLGpu* getGpuGL() const {
- SkASSERT(!this->wasDestroyed());
- return (GrGLGpu*)(this->getGpu());
- }
-
- GrGLBufferImpl fImpl;
-
- typedef GrVertexBuffer INHERITED;
-};
-
-#endif
diff --git a/src/gpu/vk/GrVkCaps.cpp b/src/gpu/vk/GrVkCaps.cpp
index 7cfddbab2a..35aef7a29a 100644
--- a/src/gpu/vk/GrVkCaps.cpp
+++ b/src/gpu/vk/GrVkCaps.cpp
@@ -29,7 +29,7 @@ GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface*
fUseDrawInsteadOfClear = false; //TODO: figure this out
fMapBufferFlags = kNone_MapFlags; //TODO: figure this out
- fGeometryBufferMapThreshold = SK_MaxS32; //TODO: figure this out
+ fBufferMapThreshold = SK_MaxS32; //TODO: figure this out
fMaxRenderTargetSize = 4096; // minimum required by spec
fMaxTextureSize = 4096; // minimum required by spec
@@ -112,7 +112,7 @@ void GrVkCaps::initGrCaps(const VkPhysicalDeviceProperties& properties,
// Assuming since we will always map in the end to upload the data we might as well just map
// from the get go. There is no hard data to suggest this is faster or slower.
- fGeometryBufferMapThreshold = 0;
+ fBufferMapThreshold = 0;
fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag;
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 21e4ee358d..5796f11d51 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -171,18 +171,26 @@ void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
}
///////////////////////////////////////////////////////////////////////////////
-GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
- return GrVkVertexBuffer::Create(this, size, dynamic);
-}
-
-GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
- return GrVkIndexBuffer::Create(this, size, dynamic);
-}
-
-GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
- GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
- : GrVkBuffer::kCopyWrite_Type;
- return GrVkTransferBuffer::Create(this, size, bufferType);
+GrBuffer* GrVkGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
+ switch (type) {
+ case kVertex_GrBufferType:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStatic_GrAccessPattern == accessPattern);
+ return GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
+ case kIndex_GrBufferType:
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStatic_GrAccessPattern == accessPattern);
+ return GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
+ case kXferCpuToGpu_GrBufferType:
+ SkASSERT(kStream_GrAccessPattern == accessPattern);
+ return GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
+ case kXferGpuToCpu_GrBufferType:
+ SkASSERT(kStream_GrAccessPattern == accessPattern);
+ return GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
+ default:
+ SkFAIL("Unknown buffer type.");
+ return nullptr;
+ }
}
////////////////////////////////////////////////////////////////////////////////
@@ -1217,8 +1225,9 @@ bool GrVkGpu::onReadPixels(GrSurface* surface,
false);
GrVkTransferBuffer* transferBuffer =
- reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
- kGpuToCpu_TransferType));
+ static_cast<GrVkTransferBuffer*>(this->createBuffer(kXferGpuToCpu_GrBufferType,
+ rowBytes * height,
+ kStream_GrAccessPattern));
bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
VkOffset3D offset = {
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index a6911f773b..a9a8a41d97 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -136,9 +136,7 @@ private:
GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&,
GrWrapOwnership) override { return NULL; }
- GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
- GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
- GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
+ GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) override;
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override;
@@ -161,7 +159,7 @@ private:
bool onTransferPixels(GrSurface*,
int left, int top, int width, int height,
- GrPixelConfig config, GrTransferBuffer* buffer,
+ GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) override { return false; }
void onResolveRenderTarget(GrRenderTarget* target) override {}
diff --git a/src/gpu/vk/GrVkIndexBuffer.cpp b/src/gpu/vk/GrVkIndexBuffer.cpp
index 52f7bd59b7..6cec856cd4 100644
--- a/src/gpu/vk/GrVkIndexBuffer.cpp
+++ b/src/gpu/vk/GrVkIndexBuffer.cpp
@@ -10,7 +10,8 @@
GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
- : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
+ : INHERITED(gpu, kIndex_GrBufferType, desc.fSizeInBytes,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern, false)
, GrVkBuffer(desc, bufferResource) {
this->registerWithCache();
}
@@ -47,11 +48,9 @@ void GrVkIndexBuffer::onAbandon() {
INHERITED::onAbandon();
}
-void* GrVkIndexBuffer::onMap() {
+void GrVkIndexBuffer::onMap() {
if (!this->wasDestroyed()) {
- return this->vkMap(this->getVkGpu());
- } else {
- return NULL;
+ this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
}
}
diff --git a/src/gpu/vk/GrVkIndexBuffer.h b/src/gpu/vk/GrVkIndexBuffer.h
index 84bbbd3377..4bca5a6b5c 100644
--- a/src/gpu/vk/GrVkIndexBuffer.h
+++ b/src/gpu/vk/GrVkIndexBuffer.h
@@ -8,13 +8,13 @@
#ifndef GrVkIndexBuffer_DEFINED
#define GrVkIndexBuffer_DEFINED
-#include "GrIndexBuffer.h"
+#include "GrBuffer.h"
#include "GrVkBuffer.h"
#include "vk/GrVkInterface.h"
class GrVkGpu;
-class GrVkIndexBuffer : public GrIndexBuffer, public GrVkBuffer {
+class GrVkIndexBuffer : public GrBuffer, public GrVkBuffer {
public:
static GrVkIndexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
@@ -27,13 +27,13 @@ private:
GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* resource);
- void* onMap() override;
+ void onMap() override;
void onUnmap() override;
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
GrVkGpu* getVkGpu() const;
- typedef GrIndexBuffer INHERITED;
+ typedef GrBuffer INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkTransferBuffer.cpp b/src/gpu/vk/GrVkTransferBuffer.cpp
index 3730627764..43fd3af9a9 100644
--- a/src/gpu/vk/GrVkTransferBuffer.cpp
+++ b/src/gpu/vk/GrVkTransferBuffer.cpp
@@ -31,7 +31,9 @@ GrVkTransferBuffer* GrVkTransferBuffer::Create(GrVkGpu* gpu, size_t size, GrVkBu
GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
- : INHERITED(gpu, desc.fSizeInBytes)
+ : INHERITED(gpu, kCopyRead_Type == desc.fType ?
+ kXferCpuToGpu_GrBufferType : kXferGpuToCpu_GrBufferType,
+ desc.fSizeInBytes, kStream_GrAccessPattern, false)
, GrVkBuffer(desc, bufferResource) {
this->registerWithCache();
}
diff --git a/src/gpu/vk/GrVkTransferBuffer.h b/src/gpu/vk/GrVkTransferBuffer.h
index f978df95fd..c6ca2147b3 100644
--- a/src/gpu/vk/GrVkTransferBuffer.h
+++ b/src/gpu/vk/GrVkTransferBuffer.h
@@ -8,13 +8,13 @@
#ifndef GrVkTransferBuffer_DEFINED
#define GrVkTransferBuffer_DEFINED
-#include "GrTransferBuffer.h"
+#include "GrBuffer.h"
#include "GrVkBuffer.h"
#include "vk/GrVkInterface.h"
class GrVkGpu;
-class GrVkTransferBuffer : public GrTransferBuffer, public GrVkBuffer {
+class GrVkTransferBuffer : public GrBuffer, public GrVkBuffer {
public:
static GrVkTransferBuffer* Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
@@ -29,11 +29,9 @@ private:
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
const SkString& dumpName) const override;
- void* onMap() override {
+ void onMap() override {
if (!this->wasDestroyed()) {
- return this->vkMap(this->getVkGpu());
- } else {
- return nullptr;
+ this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
}
}
@@ -43,12 +41,17 @@ private:
}
}
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override {
+ SkFAIL("Not implemented for transfer buffers.");
+ return false;
+ }
+
GrVkGpu* getVkGpu() const {
SkASSERT(!this->wasDestroyed());
return reinterpret_cast<GrVkGpu*>(this->getGpu());
}
- typedef GrTransferBuffer INHERITED;
+ typedef GrBuffer INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkVertexBuffer.cpp b/src/gpu/vk/GrVkVertexBuffer.cpp
index 46c6d28f23..1d3eadb42c 100644
--- a/src/gpu/vk/GrVkVertexBuffer.cpp
+++ b/src/gpu/vk/GrVkVertexBuffer.cpp
@@ -10,7 +10,8 @@
GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
- : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
+ : INHERITED(gpu, kVertex_GrBufferType, desc.fSizeInBytes,
+ desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern, false)
, GrVkBuffer(desc, bufferResource) {
this->registerWithCache();
}
@@ -46,11 +47,9 @@ void GrVkVertexBuffer::onAbandon() {
INHERITED::onAbandon();
}
-void* GrVkVertexBuffer::onMap() {
+void GrVkVertexBuffer::onMap() {
if (!this->wasDestroyed()) {
- return this->vkMap(this->getVkGpu());
- } else {
- return NULL;
+ this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
}
}
diff --git a/src/gpu/vk/GrVkVertexBuffer.h b/src/gpu/vk/GrVkVertexBuffer.h
index 82f00597b7..7786f6275c 100644
--- a/src/gpu/vk/GrVkVertexBuffer.h
+++ b/src/gpu/vk/GrVkVertexBuffer.h
@@ -8,13 +8,13 @@
#ifndef GrVkVertexBuffer_DEFINED
#define GrVkVertexBuffer_DEFINED
-#include "GrVertexBuffer.h"
+#include "GrBuffer.h"
#include "GrVkBuffer.h"
#include "vk/GrVkInterface.h"
class GrVkGpu;
-class GrVkVertexBuffer : public GrVertexBuffer, public GrVkBuffer {
+class GrVkVertexBuffer : public GrBuffer, public GrVkBuffer {
public:
static GrVkVertexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
@@ -26,13 +26,13 @@ private:
GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* resource);
- void* onMap() override;
+ void onMap() override;
void onUnmap() override;
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
GrVkGpu* getVkGpu() const;
- typedef GrVertexBuffer INHERITED;
+ typedef GrBuffer INHERITED;
};
#endif