diff options
author | bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2013-02-21 14:33:46 +0000 |
---|---|---|
committer | bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2013-02-21 14:33:46 +0000 |
commit | ee3bc3b26771a58a78075f11cde8801e0e79f723 (patch) | |
tree | 118d722cc2a318a20e6b778842887f4ac3961941 /src/gpu | |
parent | d454ec135eeef48edea7ebc47a61ff39bd654576 (diff) |
Add support for vertex data rendered from CPU arrays.
Review URL: https://codereview.appspot.com/7380044
git-svn-id: http://skia.googlecode.com/svn/trunk@7807 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'src/gpu')
-rw-r--r-- | src/gpu/GrBufferAllocPool.cpp | 18 | ||||
-rw-r--r-- | src/gpu/GrGeometryBuffer.h | 20 | ||||
-rw-r--r-- | src/gpu/GrIndexBuffer.h | 4 | ||||
-rw-r--r-- | src/gpu/GrVertexBuffer.h | 4 | ||||
-rw-r--r-- | src/gpu/gl/GrGLBufferImpl.cpp | 60 | ||||
-rw-r--r-- | src/gpu/gl/GrGLBufferImpl.h | 7 | ||||
-rw-r--r-- | src/gpu/gl/GrGLIndexBuffer.cpp | 2 | ||||
-rw-r--r-- | src/gpu/gl/GrGLVertexBuffer.cpp | 2 | ||||
-rw-r--r-- | src/gpu/gl/GrGpuGL.cpp | 85 |
9 files changed, 136 insertions, 66 deletions
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp index ec8a9c9545..db9b2c8d76 100644 --- a/src/gpu/GrBufferAllocPool.cpp +++ b/src/gpu/GrBufferAllocPool.cpp @@ -296,9 +296,21 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) { GrAssert(NULL == fBufferPtr); - if (fGpu->getCaps().bufferLockSupport() && - size > GR_GEOM_BUFFER_LOCK_THRESHOLD && - (!fFrequentResetHint || requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD)) { + // If the buffer is CPU-backed we lock it because it is free to do so and saves a copy. + // Otherwise when buffer locking is supported: + // a) If the frequently reset hint is set we only lock when the requested size meets a + // threshold (since we don't expect it is likely that we will see more vertex data) + // b) If the hint is not set we lock if the buffer size is greater than the threshold. + bool attemptLock = block.fBuffer->isCPUBacked(); + if (!attemptLock && fGpu->getCaps().bufferLockSupport()) { + if (fFrequentResetHint) { + attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD; + } else { + attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD; + } + } + + if (attemptLock) { fBufferPtr = block.fBuffer->lock(); } diff --git a/src/gpu/GrGeometryBuffer.h b/src/gpu/GrGeometryBuffer.h index 52318c1de5..3bb7118faf 100644 --- a/src/gpu/GrGeometryBuffer.h +++ b/src/gpu/GrGeometryBuffer.h @@ -29,11 +29,21 @@ public: bool dynamic() const { return fDynamic; } /** + * Returns true if the buffer is a wrapper around a CPU array. If true it + * indicates that lock will always succeed and will be free. + */ + bool isCPUBacked() const { return fCPUBacked; } + + /** * Locks the buffer to be written by the CPU. * * The previous content of the buffer is invalidated. It is an error * to draw from the buffer while it is locked. It is an error to call lock - * on an already locked buffer. + * on an already locked buffer. It may fail if the backend doesn't support + * locking the buffer. If the buffer is CPU backed then it will always + * succeed and is a free operation. Must be matched by an unlock() call. + * Currently only one lock at a time is supported (no nesting of + * lock/unlock). * * @return a pointer to the data or NULL if the lock fails. */ @@ -65,7 +75,7 @@ public: * Updates the buffer data. * * The size of the buffer will be preserved. The src data will be - * placed at the begining of the buffer and any remaining contents will + * placed at the beginning of the buffer and any remaining contents will * be undefined. * * @return returns true if the update succeeds, false otherwise. @@ -76,14 +86,16 @@ public: virtual size_t sizeInBytes() const { return fSizeInBytes; } protected: - GrGeometryBuffer(GrGpu* gpu, bool isWrapped, size_t sizeInBytes, bool dynamic) + GrGeometryBuffer(GrGpu* gpu, bool isWrapped, size_t sizeInBytes, bool dynamic, bool cpuBacked) : INHERITED(gpu, isWrapped) , fSizeInBytes(sizeInBytes) - , fDynamic(dynamic) {} + , fDynamic(dynamic) + , fCPUBacked(cpuBacked) {} private: size_t fSizeInBytes; bool fDynamic; + bool fCPUBacked; typedef GrResource INHERITED; }; diff --git a/src/gpu/GrIndexBuffer.h b/src/gpu/GrIndexBuffer.h index 6e556d2b83..69ee86f769 100644 --- a/src/gpu/GrIndexBuffer.h +++ b/src/gpu/GrIndexBuffer.h @@ -24,8 +24,8 @@ public: return this->sizeInBytes() / (sizeof(uint16_t) * 6); } protected: - GrIndexBuffer(GrGpu* gpu, bool isWrapped, size_t sizeInBytes, bool dynamic) - : INHERITED(gpu, isWrapped, sizeInBytes, dynamic) {} + GrIndexBuffer(GrGpu* gpu, bool isWrapped, size_t sizeInBytes, bool dynamic, bool cpuBacked) + : INHERITED(gpu, isWrapped, sizeInBytes, dynamic, cpuBacked) {} private: typedef GrGeometryBuffer INHERITED; }; diff --git a/src/gpu/GrVertexBuffer.h b/src/gpu/GrVertexBuffer.h index b53cbf00ae..a2bd5a1b4d 100644 --- a/src/gpu/GrVertexBuffer.h +++ b/src/gpu/GrVertexBuffer.h @@ -15,8 +15,8 @@ class GrVertexBuffer : public GrGeometryBuffer { protected: - GrVertexBuffer(GrGpu* gpu, bool isWrapped, size_t sizeInBytes, bool dynamic) - : INHERITED(gpu, isWrapped, sizeInBytes, dynamic) {} + GrVertexBuffer(GrGpu* gpu, bool isWrapped, size_t sizeInBytes, bool dynamic, bool cpuBacked) + : INHERITED(gpu, isWrapped, sizeInBytes, dynamic, cpuBacked) {} private: typedef GrGeometryBuffer INHERITED; }; diff --git a/src/gpu/gl/GrGLBufferImpl.cpp b/src/gpu/gl/GrGLBufferImpl.cpp index 0ab83fb77a..d9a8a8eca8 100644 --- a/src/gpu/gl/GrGLBufferImpl.cpp +++ b/src/gpu/gl/GrGLBufferImpl.cpp @@ -10,16 +10,32 @@ #define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X) +#if GR_DEBUG +#define VALIDATE() this->validate() +#else +#define VALIDATE() do {} while(false) +#endif + GrGLBufferImpl::GrGLBufferImpl(GrGpuGL* gpu, const Desc& desc, GrGLenum bufferType) : fDesc(desc) , fBufferType(bufferType) , fLockPtr(NULL) { - GrAssert(GR_GL_ARRAY_BUFFER == bufferType || GR_GL_ELEMENT_ARRAY_BUFFER == bufferType); + if (0 == desc.fID) { + fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW); + } else { + fCPUData = NULL; + } + VALIDATE(); } void GrGLBufferImpl::release(GrGpuGL* gpu) { - // make sure we've not been abandoned - if (fDesc.fID && !fDesc.fIsWrapped) { + // make sure we've not been abandoned or already released + if (NULL != fCPUData) { + VALIDATE(); + sk_free(fCPUData); + fCPUData = NULL; + } else if (fDesc.fID && !fDesc.fIsWrapped) { + VALIDATE(); GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID)); if (GR_GL_ARRAY_BUFFER == fBufferType) { gpu->notifyVertexBufferDelete(fDesc.fID); @@ -29,14 +45,18 @@ void GrGLBufferImpl::release(GrGpuGL* gpu) { } fDesc.fID = 0; } + fLockPtr = NULL; } void GrGLBufferImpl::abandon() { fDesc.fID = 0; fLockPtr = NULL; + sk_free(fCPUData); + fCPUData = NULL; } void GrGLBufferImpl::bind(GrGpuGL* gpu) const { + VALIDATE(); GL_CALL(gpu, BindBuffer(fBufferType, fDesc.fID)); if (GR_GL_ARRAY_BUFFER == fBufferType) { gpu->notifyVertexBufferBind(fDesc.fID); @@ -47,9 +67,11 @@ void GrGLBufferImpl::bind(GrGpuGL* gpu) const { } void* GrGLBufferImpl::lock(GrGpuGL* gpu) { - GrAssert(0 != fDesc.fID); + VALIDATE(); GrAssert(!this->isLocked()); - if (gpu->getCaps().bufferLockSupport()) { + if (0 == fDesc.fID) { + fLockPtr = fCPUData; + } else if (gpu->getCaps().bufferLockSupport()) { this->bind(gpu); // Let driver know it can discard the old data GL_CALL(gpu, BufferData(fBufferType, @@ -59,34 +81,35 @@ void* GrGLBufferImpl::lock(GrGpuGL* gpu) { GR_GL_CALL_RET(gpu->glInterface(), fLockPtr, MapBuffer(fBufferType, GR_GL_WRITE_ONLY)); - return fLockPtr; } - return NULL; + return fLockPtr; } void GrGLBufferImpl::unlock(GrGpuGL* gpu) { - - GrAssert(0 != fDesc.fID); + VALIDATE(); GrAssert(this->isLocked()); - GrAssert(gpu->getCaps().bufferLockSupport()); - - this->bind(gpu); - GL_CALL(gpu, UnmapBuffer(fBufferType)); + if (0 != fDesc.fID) { + GrAssert(gpu->getCaps().bufferLockSupport()); + this->bind(gpu); + GL_CALL(gpu, UnmapBuffer(fBufferType)); + } fLockPtr = NULL; } bool GrGLBufferImpl::isLocked() const { - GrAssert(0 != fDesc.fID); + VALIDATE(); return NULL != fLockPtr; } bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes) { GrAssert(!this->isLocked()); + VALIDATE(); if (srcSizeInBytes > fDesc.fSizeInBytes) { return false; } if (0 == fDesc.fID) { - return false; + memcpy(fCPUData, src, srcSizeInBytes); + return true; } this->bind(gpu); GrGLenum usage = fDesc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW; @@ -129,3 +152,10 @@ bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInB #endif return true; } + +void GrGLBufferImpl::validate() const { + GrAssert(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType); + GrAssert((0 == fDesc.fID) == (NULL != fCPUData)); + GrAssert(0 != fDesc.fID || !fDesc.fIsWrapped); + GrAssert(NULL == fCPUData || NULL == fLockPtr || fCPUData == fLockPtr); +} diff --git a/src/gpu/gl/GrGLBufferImpl.h b/src/gpu/gl/GrGLBufferImpl.h index 7b44f14dc1..1fd8ce074f 100644 --- a/src/gpu/gl/GrGLBufferImpl.h +++ b/src/gpu/gl/GrGLBufferImpl.h @@ -21,7 +21,7 @@ class GrGLBufferImpl : public GrNoncopyable { public: struct Desc { bool fIsWrapped; - GrGLuint fID; + GrGLuint fID; // set to 0 to indicate buffer is CPU-backed and not a VBO. size_t fSizeInBytes; bool fDynamic; }; @@ -36,7 +36,7 @@ public: void release(GrGpuGL* gpu); GrGLuint bufferID() const { return fDesc.fID; } - size_t baseOffset() const { return 0; } + size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); } void bind(GrGpuGL* gpu) const; @@ -47,8 +47,11 @@ public: bool updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes); private: + void validate() const; + Desc fDesc; GrGLenum fBufferType; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER + void* fCPUData; void* fLockPtr; typedef GrNoncopyable INHERITED; diff --git a/src/gpu/gl/GrGLIndexBuffer.cpp b/src/gpu/gl/GrGLIndexBuffer.cpp index ff167347f9..b6290b1826 100644 --- a/src/gpu/gl/GrGLIndexBuffer.cpp +++ b/src/gpu/gl/GrGLIndexBuffer.cpp @@ -9,7 +9,7 @@ #include "GrGpuGL.h" GrGLIndexBuffer::GrGLIndexBuffer(GrGpuGL* gpu, const Desc& desc) - : INHERITED(gpu, desc.fIsWrapped, desc.fSizeInBytes, desc.fDynamic) + : INHERITED(gpu, desc.fIsWrapped, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID) , fImpl(gpu, desc, GR_GL_ELEMENT_ARRAY_BUFFER) { } diff --git a/src/gpu/gl/GrGLVertexBuffer.cpp b/src/gpu/gl/GrGLVertexBuffer.cpp index 4152251433..685166c90b 100644 --- a/src/gpu/gl/GrGLVertexBuffer.cpp +++ b/src/gpu/gl/GrGLVertexBuffer.cpp @@ -9,7 +9,7 @@ #include "GrGpuGL.h" GrGLVertexBuffer::GrGLVertexBuffer(GrGpuGL* gpu, const Desc& desc) - : INHERITED(gpu, desc.fIsWrapped, desc.fSizeInBytes, desc.fDynamic) + : INHERITED(gpu, desc.fIsWrapped, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID) , fImpl(gpu, desc, GR_GL_ARRAY_BUFFER) { } diff --git a/src/gpu/gl/GrGpuGL.cpp b/src/gpu/gl/GrGpuGL.cpp index 785b500899..235811c3fb 100644 --- a/src/gpu/gl/GrGpuGL.cpp +++ b/src/gpu/gl/GrGpuGL.cpp @@ -1231,27 +1231,33 @@ GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(uint32_t size, bool dynamic) { desc.fSizeInBytes = size; desc.fIsWrapped = false; - GL_CALL(GenBuffers(1, &desc.fID)); - if (desc.fID) { - GL_CALL(BindBuffer(GR_GL_ARRAY_BUFFER, desc.fID)); - CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); - // make sure driver can allocate memory for this buffer - GL_ALLOC_CALL(this->glInterface(), - BufferData(GR_GL_ARRAY_BUFFER, - desc.fSizeInBytes, - NULL, // data ptr - desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); - if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { - GL_CALL(DeleteBuffers(1, &desc.fID)); - // deleting bound buffer does implicit bind to 0 - fHWGeometryState.setVertexBufferID(0); - return NULL; - } + if (false && desc.fDynamic) { + desc.fID = 0; GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); - fHWGeometryState.setVertexBufferID(desc.fID); return vertexBuffer; + } else { + GL_CALL(GenBuffers(1, &desc.fID)); + if (desc.fID) { + GL_CALL(BindBuffer(GR_GL_ARRAY_BUFFER, desc.fID)); + fHWGeometryState.setVertexBufferID(desc.fID); + CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); + // make sure driver can allocate memory for this buffer + GL_ALLOC_CALL(this->glInterface(), + BufferData(GR_GL_ARRAY_BUFFER, + desc.fSizeInBytes, + NULL, // data ptr + desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); + if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { + GL_CALL(DeleteBuffers(1, &desc.fID)); + // deleting bound buffer does implicit bind to 0 + fHWGeometryState.setVertexBufferID(0); + return NULL; + } + GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); + return vertexBuffer; + } + return NULL; } - return NULL; } GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(uint32_t size, bool dynamic) { @@ -1260,27 +1266,33 @@ GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(uint32_t size, bool dynamic) { desc.fSizeInBytes = size; desc.fIsWrapped = false; - GL_CALL(GenBuffers(1, &desc.fID)); - if (desc.fID) { - GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, desc.fID)); - CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); - // make sure driver can allocate memory for this buffer - GL_ALLOC_CALL(this->glInterface(), - BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, - desc.fSizeInBytes, - NULL, // data ptr - desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); - if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { - GL_CALL(DeleteBuffers(1, &desc.fID)); - // deleting bound buffer does implicit bind to 0 - fHWGeometryState.setIndexBufferID(0); - return NULL; - } + if (false && desc.fDynamic) { + desc.fID = 0; GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); - fHWGeometryState.setIndexBufferID(desc.fID); return indexBuffer; + } else { + GL_CALL(GenBuffers(1, &desc.fID)); + if (desc.fID) { + GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, desc.fID)); + fHWGeometryState.setIndexBufferID(desc.fID); + CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); + // make sure driver can allocate memory for this buffer + GL_ALLOC_CALL(this->glInterface(), + BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, + desc.fSizeInBytes, + NULL, // data ptr + desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW)); + if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { + GL_CALL(DeleteBuffers(1, &desc.fID)); + // deleting bound buffer does implicit bind to 0 + fHWGeometryState.setIndexBufferID(0); + return NULL; + } + GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); + return indexBuffer; + } + return NULL; } - return NULL; } GrPath* GrGpuGL::onCreatePath(const SkPath& inPath) { @@ -2354,6 +2366,7 @@ GrGLVertexBuffer* GrGpuGL::setBuffers(bool indexed, GrAssert(NULL != ibuf); GrAssert(!ibuf->isLocked()); + *indexOffsetInBytes += ibuf->baseOffset(); if (!fHWGeometryState.isIndexBufferIDBound(ibuf->bufferID())) { ibuf->bind(); fHWGeometryState.setIndexBufferID(ibuf->bufferID()); |