aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/gpu/vk/GrVkGpu.cpp8
-rw-r--r--src/gpu/vk/GrVkImage.cpp56
-rw-r--r--src/gpu/vk/GrVkImage.h4
-rw-r--r--src/gpu/vk/GrVkMemory.cpp52
-rw-r--r--src/gpu/vk/GrVkMemory.h4
-rw-r--r--tools/sk_app/VulkanWindowContext.cpp6
6 files changed, 65 insertions, 65 deletions
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 428a74bae7..56d0b95bd0 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -1391,7 +1391,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
- barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.srcAccessMask = GrVkImage::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
@@ -1400,7 +1400,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
barrier.image = image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
- VK_CALL(CmdPipelineBarrier(cmdBuffer, GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ VK_CALL(CmdPipelineBarrier(cmdBuffer, GrVkImage::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier));
initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
@@ -1432,7 +1432,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
- barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.srcAccessMask = GrVkImage::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -1441,7 +1441,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
barrier.image = image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
VK_CALL(CmdPipelineBarrier(cmdBuffer,
- GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ GrVkImage::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0, nullptr,
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
index 556a088fb1..9480a7b9cc 100644
--- a/src/gpu/vk/GrVkImage.cpp
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -12,6 +12,58 @@
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+VkPipelineStageFlags GrVkImage::LayoutToPipelineStageFlags(const VkImageLayout layout) {
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ }
+
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+}
+
+VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
+ // Currently we assume we will never being doing any explict shader writes (this doesn't include
+ // color attachment or depth/stencil writes). So we will ignore the
+ // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
+
+ // We can only directly access the host memory if we are in preinitialized or general layout,
+ // and the image is linear.
+ // TODO: Add check for linear here so we are not always adding host to general, and we should
+ // only be in preinitialized if we are linear
+ VkAccessFlags flags = 0;;
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ flags = VK_ACCESS_HOST_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ flags = VK_ACCESS_SHADER_READ_BIT;
+ }
+ return flags;
+}
+
VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
switch (format) {
case VK_FORMAT_S8_UINT:
@@ -42,8 +94,8 @@ void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
return;
}
- VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(currentLayout);
- VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(currentLayout);
+ VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
+ VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineStageFlags(currentLayout);
VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
VkImageMemoryBarrier imageMemoryBarrier = {
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
index 038b0ee86d..e19228ca11 100644
--- a/src/gpu/vk/GrVkImage.h
+++ b/src/gpu/vk/GrVkImage.h
@@ -100,6 +100,10 @@ public:
void setResourceRelease(sk_sp<GrReleaseProcHelper> releaseHelper);
+ // Helpers to use for setting the layout of the VkImage
+ static VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
+ static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
+
protected:
void releaseImage(const GrVkGpu* gpu);
void abandonImage();
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
index e391d02aee..4f619a3ef3 100644
--- a/src/gpu/vk/GrVkMemory.cpp
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -246,58 +246,6 @@ void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
}
}
-VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
- if (VK_IMAGE_LAYOUT_GENERAL == layout) {
- return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
- } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
- return VK_PIPELINE_STAGE_TRANSFER_BIT;
- } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
- return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
- } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
- return VK_PIPELINE_STAGE_HOST_BIT;
- }
-
- SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
- return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
-}
-
-VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
- // Currently we assume we will never being doing any explict shader writes (this doesn't include
- // color attachment or depth/stencil writes). So we will ignore the
- // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
-
- // We can only directly access the host memory if we are in preinitialized or general layout,
- // and the image is linear.
- // TODO: Add check for linear here so we are not always adding host to general, and we should
- // only be in preinitialized if we are linear
- VkAccessFlags flags = 0;;
- if (VK_IMAGE_LAYOUT_GENERAL == layout) {
- flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_TRANSFER_WRITE_BIT |
- VK_ACCESS_TRANSFER_READ_BIT |
- VK_ACCESS_SHADER_READ_BIT |
- VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
- } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
- flags = VK_ACCESS_HOST_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
- flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
- flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
- flags = VK_ACCESS_TRANSFER_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
- flags = VK_ACCESS_TRANSFER_READ_BIT;
- } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
- flags = VK_ACCESS_SHADER_READ_BIT;
- }
- return flags;
-}
-
void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
diff --git a/src/gpu/vk/GrVkMemory.h b/src/gpu/vk/GrVkMemory.h
index baf843e02c..bb6681435f 100644
--- a/src/gpu/vk/GrVkMemory.h
+++ b/src/gpu/vk/GrVkMemory.h
@@ -34,10 +34,6 @@ namespace GrVkMemory {
GrVkAlloc* alloc);
void FreeImageMemory(const GrVkGpu* gpu, bool linearTiling, const GrVkAlloc& alloc);
- VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
-
- VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
-
void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size);
void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
diff --git a/tools/sk_app/VulkanWindowContext.cpp b/tools/sk_app/VulkanWindowContext.cpp
index 7021504f2b..c145fd2410 100644
--- a/tools/sk_app/VulkanWindowContext.cpp
+++ b/tools/sk_app/VulkanWindowContext.cpp
@@ -12,8 +12,8 @@
#include "SkSurface.h"
#include "VulkanWindowContext.h"
+#include "vk/GrVkImage.h"
#include "vk/GrVkInterface.h"
-#include "vk/GrVkMemory.h"
#include "vk/GrVkUtil.h"
#include "vk/GrVkTypes.h"
@@ -565,9 +565,9 @@ void VulkanWindowContext::swapBuffers() {
SkASSERT(imageInfo.fImage == fImages[backbuffer->fImageIndex]);
VkImageLayout layout = imageInfo.fImageLayout;
- VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineStageFlags(layout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
- VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(layout);
VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
VkImageMemoryBarrier imageMemoryBarrier = {