aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2018-03-02 11:44:22 -0500
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-03-02 17:04:50 +0000
commite35a99ed706dcd0407c7ca4373ed97d21d988069 (patch)
tree291a5109bed7a2939c556b948caa74384e3df0ce
parent8080a6e7050ec38aaa81c700e9db4bc2e3ec9009 (diff)
Update Flush and Invalidate Memory calls in vulkan to take offset and size
Bug: skia: Change-Id: I4faf9f431422f27096fce4605be281c28935df08 Reviewed-on: https://skia-review.googlesource.com/111782 Reviewed-by: Jim Van Verth <jvanverth@google.com> Commit-Queue: Greg Daniel <egdaniel@google.com>
-rw-r--r--src/gpu/vk/GrVkBuffer.cpp8
-rw-r--r--src/gpu/vk/GrVkGpu.cpp7
-rw-r--r--src/gpu/vk/GrVkMemory.cpp43
-rw-r--r--src/gpu/vk/GrVkMemory.h6
4 files changed, 47 insertions, 17 deletions
diff --git a/src/gpu/vk/GrVkBuffer.cpp b/src/gpu/vk/GrVkBuffer.cpp
index 54713cb8c9..f65b15ded0 100644
--- a/src/gpu/vk/GrVkBuffer.cpp
+++ b/src/gpu/vk/GrVkBuffer.cpp
@@ -206,7 +206,13 @@ void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
SkASSERT(this->vkIsMapped());
if (fDesc.fDynamic) {
- GrVkMemory::FlushMappedAlloc(gpu, this->alloc(), fMappedSize);
+ // We currently don't use fOffset
+ SkASSERT(0 == fOffset);
+ VkDeviceSize flushOffset = this->alloc().fOffset + fOffset;
+ VkDeviceSize flushSize = gpu->vkCaps().canUseWholeSizeOnFlushMappedMemory() ? VK_WHOLE_SIZE
+ : fMappedSize;
+
+ GrVkMemory::FlushMappedAlloc(gpu, this->alloc(), flushOffset, flushSize);
VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
fMapPtr = nullptr;
fMappedSize = 0;
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index e4a980a97a..bfd5593c41 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -615,7 +615,7 @@ bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, GrSurfaceOrigin texOrigin, i
height);
}
- GrVkMemory::FlushMappedAlloc(this, alloc, size);
+ GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory));
return true;
@@ -1169,7 +1169,7 @@ bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size
}
}
}
- GrVkMemory::FlushMappedAlloc(gpu, alloc, mapSize);
+ GrVkMemory::FlushMappedAlloc(gpu, alloc, mapOffset, mapSize);
GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
return true;
}
@@ -2015,7 +2015,8 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, GrSurfaceOrigin origin, int left,
// we can copy the data out of the buffer.
this->submitCommandBuffer(kForce_SyncQueue);
void* mappedMemory = transferBuffer->map();
- GrVkMemory::InvalidateMappedAlloc(this, transferBuffer->alloc());
+ const GrVkAlloc& transAlloc = transferBuffer->alloc();
+ GrVkMemory::InvalidateMappedAlloc(this, transAlloc, transAlloc.fOffset, VK_WHOLE_SIZE);
if (copyFromOrigin) {
uint32_t skipRows = region.imageExtent.height - height;
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
index d744a7a1e3..e391d02aee 100644
--- a/src/gpu/vk/GrVkMemory.cpp
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -298,32 +298,53 @@ VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
return flags;
}
-void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize size) {
+void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+#ifdef SK_DEBUG
+ SkASSERT(offset >= alloc.fOffset);
+ VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
+ SkASSERT(0 == (offset & (alignment-1)));
+ if (size != VK_WHOLE_SIZE) {
+ SkASSERT(size > 0);
+ SkASSERT(0 == (size & (alignment-1)) ||
+ (offset + size) == (alloc.fOffset + alloc.fSize));
+ SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
+ }
+#endif
+
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = alloc.fMemory;
- mappedMemoryRange.offset = alloc.fOffset;
- if (gpu->vkCaps().canUseWholeSizeOnFlushMappedMemory()) {
- mappedMemoryRange.size = VK_WHOLE_SIZE; // Size of what we mapped
- } else {
- SkASSERT(size > 0);
- mappedMemoryRange.size = size;
- }
+ mappedMemoryRange.offset = offset;
+ mappedMemoryRange.size = size;
GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(),
1, &mappedMemoryRange));
}
}
-void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
+void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
+ VkDeviceSize offset, VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
+#ifdef SK_DEBUG
+ SkASSERT(offset >= alloc.fOffset);
+ VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
+ SkASSERT(0 == (offset & (alignment-1)));
+ if (size != VK_WHOLE_SIZE) {
+ SkASSERT(size > 0);
+ SkASSERT(0 == (size & (alignment-1)) ||
+ (offset + size) == (alloc.fOffset + alloc.fSize));
+ SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
+ }
+#endif
+
VkMappedMemoryRange mappedMemoryRange;
memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mappedMemoryRange.memory = alloc.fMemory;
- mappedMemoryRange.offset = alloc.fOffset;
- mappedMemoryRange.size = VK_WHOLE_SIZE; // Size of what we mapped
+ mappedMemoryRange.offset = offset;
+ mappedMemoryRange.size = size;
GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(),
1, &mappedMemoryRange));
}
diff --git a/src/gpu/vk/GrVkMemory.h b/src/gpu/vk/GrVkMemory.h
index 8dd43bb4a4..baf843e02c 100644
--- a/src/gpu/vk/GrVkMemory.h
+++ b/src/gpu/vk/GrVkMemory.h
@@ -38,8 +38,10 @@ namespace GrVkMemory {
VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
- void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize size);
- void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
+ void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size);
+ void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
+ VkDeviceSize size);
}
class GrVkFreeListAlloc {