aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2018-05-25 11:02:16 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-05-25 15:24:42 +0000
commit26c0e4c1f54759249c2d61b50fb5430bd73793f2 (patch)
tree42dee870d6db76cc4a532f77914bfbf837918be1 /src
parente7e6e22912cec1bd59348fd3889c7d8ae7b6bdc4 (diff)
Create API for GrVkMemoryAllocator and impliment use of AMD VulkanMemoryAllocator on this API.
Bug: skia: Change-Id: I1e122e1b11ab308c2f83cb98c36c81511f4507d0 Reviewed-on: https://skia-review.googlesource.com/129980 Commit-Queue: Greg Daniel <egdaniel@google.com> Reviewed-by: Brian Salomon <bsalomon@google.com> Reviewed-by: Jim Van Verth <jvanverth@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/gpu/vk/GrVkAMDMemoryAllocator.cpp267
-rw-r--r--src/gpu/vk/GrVkAMDMemoryAllocator.h56
2 files changed, 323 insertions, 0 deletions
diff --git a/src/gpu/vk/GrVkAMDMemoryAllocator.cpp b/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
new file mode 100644
index 0000000000..0b838ece3a
--- /dev/null
+++ b/src/gpu/vk/GrVkAMDMemoryAllocator.cpp
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkAMDMemoryAllocator.h"
+
+#include "vk/GrVkInterface.h"
+#include "GrVkUtil.h"
+
+GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VkPhysicalDevice physicalDevice,
+ VkDevice device,
+ sk_sp<const GrVkInterface> interface)
+ : fAllocator(VK_NULL_HANDLE)
+ , fInterface(std::move(interface))
+ , fDevice(device) {
+#define GR_COPY_FUNCTION(NAME) functions.vk##NAME = fInterface->fFunctions.f##NAME;
+
+ VmaVulkanFunctions functions;
+ GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
+ GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
+ GR_COPY_FUNCTION(AllocateMemory);
+ GR_COPY_FUNCTION(FreeMemory);
+ GR_COPY_FUNCTION(MapMemory);
+ GR_COPY_FUNCTION(UnmapMemory);
+ GR_COPY_FUNCTION(BindBufferMemory);
+ GR_COPY_FUNCTION(BindImageMemory);
+ GR_COPY_FUNCTION(GetBufferMemoryRequirements);
+ GR_COPY_FUNCTION(GetImageMemoryRequirements);
+ GR_COPY_FUNCTION(CreateBuffer);
+ GR_COPY_FUNCTION(DestroyBuffer);
+ GR_COPY_FUNCTION(CreateImage);
+ GR_COPY_FUNCTION(DestroyImage);
+
+ // Skia current doesn't support VK_KHR_dedicated_allocation
+ functions.vkGetBufferMemoryRequirements2KHR = nullptr;
+ functions.vkGetImageMemoryRequirements2KHR = nullptr;
+
+ VmaAllocatorCreateInfo info;
+ info.flags = 0;
+ info.physicalDevice = physicalDevice;
+ info.device = device;
+ info.preferredLargeHeapBlockSize = 0;
+ info.pAllocationCallbacks = nullptr;
+ info.pDeviceMemoryCallbacks = nullptr;
+ info.frameInUseCount = 0;
+ info.pHeapSizeLimit = nullptr;
+ info.pVulkanFunctions = &functions;
+
+ vmaCreateAllocator(&info, &fAllocator);
+}
+
+GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
+ vmaDestroyAllocator(fAllocator);
+ fAllocator = VK_NULL_HANDLE;
+}
+
+bool GrVkAMDMemoryAllocator::allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags,
+ GrVkBackendMemory* backendMemory) {
+ VmaAllocationCreateInfo info;
+ info.flags = 0;
+ info.usage = VMA_MEMORY_USAGE_UNKNOWN;
+ info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ info.preferredFlags = 0;
+ info.memoryTypeBits = 0;
+ info.pool = VK_NULL_HANDLE;
+ info.pUserData = nullptr;
+
+ if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
+ info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+ }
+
+ if (AllocationPropertyFlags::kLazyAllocation & flags) {
+ info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+ }
+
+ VmaAllocation allocation;
+ VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
+ if (VK_SUCCESS != result) {
+ return false;
+ }
+ *backendMemory = (GrVkBackendMemory)allocation;
+ return true;
+}
+
+bool GrVkAMDMemoryAllocator::allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
+ AllocationPropertyFlags flags,
+ GrVkBackendMemory* backendMemory) {
+ VmaAllocationCreateInfo info;
+ info.flags = 0;
+ info.usage = VMA_MEMORY_USAGE_UNKNOWN;
+ info.memoryTypeBits = 0;
+ info.pool = VK_NULL_HANDLE;
+ info.pUserData = nullptr;
+
+ switch (usage) {
+ case BufferUsage::kGpuOnly:
+ info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ info.preferredFlags = 0;
+ break;
+ case BufferUsage::kCpuOnly:
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ break;
+ case BufferUsage::kCpuWritesGpuReads:
+ // First attempt to try memory is also device local
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ break;
+ case BufferUsage::kGpuWritesCpuReads:
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ info.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ break;
+ }
+
+ if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
+ info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+ }
+
+ if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
+ info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+ }
+
+ if ((AllocationPropertyFlags::kPersistentlyMapped & flags) && BufferUsage::kGpuOnly != usage) {
+ info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
+ }
+
+ VmaAllocation allocation;
+ VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
+ if (VK_SUCCESS != result) {
+ if (usage == BufferUsage::kCpuWritesGpuReads) {
+ // We try again but this time drop the requirement for device local
+ info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
+ }
+ }
+ if (VK_SUCCESS != result) {
+ return false;
+ }
+ *backendMemory = (GrVkBackendMemory)allocation;
+ return true;
+}
+
+void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ vmaFreeMemory(fAllocator, allocation);
+}
+
+void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
+ GrVkAlloc* alloc) const {
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ VmaAllocationInfo vmaInfo;
+ vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
+
+ VkMemoryPropertyFlags memFlags;
+ vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
+
+ uint32_t flags = 0;
+ if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
+ flags |= GrVkAlloc::kMappable_Flag;
+ }
+ if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
+ flags |= GrVkAlloc::kNoncoherent_Flag;
+ }
+
+ alloc->fMemory = vmaInfo.deviceMemory;
+ alloc->fOffset = vmaInfo.offset;
+ alloc->fSize = vmaInfo.size;
+ alloc->fFlags = flags;
+ alloc->fBackendMemory = memoryHandle;
+}
+
+void* GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle) {
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ void* mapPtr;
+ vmaMapMemory(fAllocator, allocation, &mapPtr);
+ return mapPtr;
+}
+
+void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
+ const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
+ vmaUnmapMemory(fAllocator, allocation);
+}
+
+void GrVkAMDMemoryAllocator::flushMappedMemory(const GrVkBackendMemory& memoryHandle,
+ VkDeviceSize offset, VkDeviceSize size) {
+ GrVkAlloc info;
+ this->getAllocInfo(memoryHandle, &info);
+
+ if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
+ // We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
+ const VkPhysicalDeviceProperties* physDevProps;
+ vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
+ VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
+
+ offset = offset + info.fOffset;
+ VkDeviceSize offsetDiff = offset & (alignment -1);
+ offset = offset - offsetDiff;
+ size = (size + alignment - 1) & ~(alignment - 1);
+#ifdef SK_DEBUG
+ SkASSERT(offset >= info.fOffset);
+ SkASSERT(offset + size <= info.fOffset + info.fSize);
+ SkASSERT(0 == (offset & (alignment-1)));
+ SkASSERT(size > 0);
+ SkASSERT(0 == (size & (alignment-1)));
+#endif
+
+ VkMappedMemoryRange mappedMemoryRange;
+ memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
+ mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+ mappedMemoryRange.memory = info.fMemory;
+ mappedMemoryRange.offset = offset;
+ mappedMemoryRange.size = size;
+ GR_VK_CALL(fInterface, FlushMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
+ }
+}
+
+void GrVkAMDMemoryAllocator::invalidateMappedMemory(const GrVkBackendMemory& memoryHandle,
+ VkDeviceSize offset, VkDeviceSize size) {
+ GrVkAlloc info;
+ this->getAllocInfo(memoryHandle, &info);
+
+ if (GrVkAlloc::kNoncoherent_Flag & info.fFlags) {
+ // We need to store the nonCoherentAtomSize for non-coherent flush/invalidate alignment.
+ const VkPhysicalDeviceProperties* physDevProps;
+ vmaGetPhysicalDeviceProperties(fAllocator, &physDevProps);
+ VkDeviceSize alignment = physDevProps->limits.nonCoherentAtomSize;
+
+ offset = offset + info.fOffset;
+ VkDeviceSize offsetDiff = offset & (alignment -1);
+ offset = offset - offsetDiff;
+ size = (size + alignment - 1) & ~(alignment - 1);
+#ifdef SK_DEBUG
+ SkASSERT(offset >= info.fOffset);
+ SkASSERT(offset + size <= info.fOffset + info.fSize);
+ SkASSERT(0 == (offset & (alignment-1)));
+ SkASSERT(size > 0);
+ SkASSERT(0 == (size & (alignment-1)));
+#endif
+
+ VkMappedMemoryRange mappedMemoryRange;
+ memset(&mappedMemoryRange, 0, sizeof(VkMappedMemoryRange));
+ mappedMemoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+ mappedMemoryRange.memory = info.fMemory;
+ mappedMemoryRange.offset = offset;
+ mappedMemoryRange.size = size;
+ GR_VK_CALL(fInterface, InvalidateMappedMemoryRanges(fDevice, 1, &mappedMemoryRange));
+ }
+}
+
+uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
+ VmaStats stats;
+ vmaCalculateStats(fAllocator, &stats);
+ return stats.total.usedBytes;
+}
+
+uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
+ VmaStats stats;
+ vmaCalculateStats(fAllocator, &stats);
+ return stats.total.usedBytes + stats.total.unusedBytes;
+}
+
diff --git a/src/gpu/vk/GrVkAMDMemoryAllocator.h b/src/gpu/vk/GrVkAMDMemoryAllocator.h
new file mode 100644
index 0000000000..297b0adaf7
--- /dev/null
+++ b/src/gpu/vk/GrVkAMDMemoryAllocator.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkAMDMemoryAllocator_DEFINED
+#define GrVkAMDMemoryAllocator_DEFINED
+
+#include "vk/GrVkMemoryAllocator.h"
+
+#include "GrVulkanMemoryAllocator.h"
+
+struct GrVkInterface;
+
+class GrVkAMDMemoryAllocator : public GrVkMemoryAllocator {
+public:
+ GrVkAMDMemoryAllocator(VkPhysicalDevice physicalDevice, VkDevice device,
+ sk_sp<const GrVkInterface> interface);
+
+ ~GrVkAMDMemoryAllocator() override;
+
+ bool allocateMemoryForImage(VkImage image, AllocationPropertyFlags flags, GrVkBackendMemory*) override;
+
+ bool allocateMemoryForBuffer(VkBuffer buffer, BufferUsage usage,
+ AllocationPropertyFlags flags, GrVkBackendMemory*) override;
+
+ void freeMemory(const GrVkBackendMemory&) override;
+
+ void getAllocInfo(const GrVkBackendMemory&, GrVkAlloc*) const override;
+
+ void* mapMemory(const GrVkBackendMemory&) override;
+ void unmapMemory(const GrVkBackendMemory&) override;
+
+ void flushMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
+ VkDeviceSize size) override;
+ void invalidateMappedMemory(const GrVkBackendMemory&, VkDeviceSize offset,
+ VkDeviceSize size) override;
+
+ uint64_t totalUsedMemory() const override;
+ uint64_t totalAllocatedMemory() const override;
+
+private:
+ VmaAllocator fAllocator;
+
+ // If a future version of the AMD allocator has helper functions for flushing and invalidating
+ // memory, then we won't need to save the GrVkInterface here since we won't need to make direct
+ // vulkan calls.
+ sk_sp<const GrVkInterface> fInterface;
+ VkDevice fDevice;
+
+ typedef GrVkMemoryAllocator INHERITED;
+};
+
+#endif