aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/vk/GrVkMemory.cpp
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2016-02-22 09:56:40 -0500
committerGravatar Greg Daniel <egdaniel@google.com>2016-02-22 09:56:40 -0500
commit164a9f061c5186ae931cc23a3c73f32472e80ff5 (patch)
tree79929f22803c622a2e0c8dbc333717c275ca0405 /src/gpu/vk/GrVkMemory.cpp
parent129ed1cd6d792f3f6cf563aefa9756fc6308289d (diff)
Add vulkan files into skia repo. This is an incomplete backend with only partial functionality at this time.
Diffstat (limited to 'src/gpu/vk/GrVkMemory.cpp')
-rw-r--r--src/gpu/vk/GrVkMemory.cpp157
1 files changed, 157 insertions, 0 deletions
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
new file mode 100644
index 0000000000..276fd4b0a2
--- /dev/null
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -0,0 +1,157 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkMemory.h"
+
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+
+static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
+ uint32_t typeBits,
+ VkMemoryPropertyFlags requestedMemFlags,
+ uint32_t* typeIndex) {
+ uint32_t checkBit = 1;
+ for (uint32_t i = 0; i < 32; ++i) {
+ if (typeBits & checkBit) {
+ uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
+ requestedMemFlags;
+ if (supportedFlags == requestedMemFlags) {
+ *typeIndex = i;
+ return true;
+ }
+ }
+ checkBit <<= 1;
+ }
+ return false;
+}
+
+static bool alloc_device_memory(const GrVkGpu* gpu,
+ VkMemoryRequirements* memReqs,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ uint32_t typeIndex;
+ if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
+ memReqs->memoryTypeBits,
+ flags,
+ &typeIndex)) {
+ return false;
+ }
+
+ VkMemoryAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ memReqs->size, // allocationSize
+ typeIndex, // memoryTypeIndex
+ };
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
+ &allocInfo,
+ nullptr,
+ memory));
+ if (err) {
+ return false;
+ }
+ return true;
+}
+
+bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ const GrVkInterface* interface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(interface, GetBufferMemoryRequirements(device, buffer, &memReqs));
+
+
+ if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
+ return false;
+ }
+
+ // Bind Memory to queue
+ VkResult err = GR_VK_CALL(interface, BindBufferMemory(device, buffer, *memory, 0));
+ if (err) {
+ GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr));
+ return false;
+ }
+ return true;
+}
+
+bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ const GrVkInterface* interface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(interface, GetImageMemoryRequirements(device, image, &memReqs));
+
+ if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
+ return false;
+ }
+
+ // Bind Memory to queue
+ VkResult err = GR_VK_CALL(interface, BindImageMemory(device, image, *memory, 0));
+ if (err) {
+ GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr));
+ return false;
+ }
+ return true;
+}
+
+VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ }
+
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+}
+
+VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
+ // Currently we assume we will never being doing any explict shader writes (this doesn't include
+ // color attachment or depth/stencil writes). So we will ignore the
+ // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
+
+ // We can only directly access the host memory if we are in preinitialized or general layout,
+ // and the image is linear.
+ // TODO: Add check for linear here so we are not always adding host to general, and we should
+ // only be in preinitialized if we are linear
+ VkAccessFlags flags = 0;;
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ flags = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ flags = VK_ACCESS_SHADER_READ_BIT;
+ }
+ return flags;
+}
+