aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2018-05-24 12:34:29 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-05-24 16:58:38 +0000
commit6ddbafcc898d1f487f147c651a3b6519b7210802 (patch)
tree69cee6d08d7b83818d5731a9228dfe48cfd3c635 /src
parent4ade54d81ba2cde9cc14c85a25ef710bdc236c39 (diff)
Move vulkan layout helpers from GrVkMemory to GrVkImage.
Bug: skia: Change-Id: Iebcf5844a0b469dea1e96e351f91239ff512f708 Reviewed-on: https://skia-review.googlesource.com/129934 Reviewed-by: Jim Van Verth <jvanverth@google.com> Commit-Queue: Greg Daniel <egdaniel@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/gpu/vk/GrVkGpu.cpp8
-rw-r--r--src/gpu/vk/GrVkImage.cpp56
-rw-r--r--src/gpu/vk/GrVkImage.h4
-rw-r--r--src/gpu/vk/GrVkMemory.cpp52
-rw-r--r--src/gpu/vk/GrVkMemory.h4
5 files changed, 62 insertions, 62 deletions
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 428a74bae7..56d0b95bd0 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -1391,7 +1391,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
- barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.srcAccessMask = GrVkImage::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
@@ -1400,7 +1400,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
barrier.image = image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
- VK_CALL(CmdPipelineBarrier(cmdBuffer, GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ VK_CALL(CmdPipelineBarrier(cmdBuffer, GrVkImage::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier));
initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
@@ -1432,7 +1432,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
- barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
+ barrier.srcAccessMask = GrVkImage::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -1441,7 +1441,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
barrier.image = image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
VK_CALL(CmdPipelineBarrier(cmdBuffer,
- GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
+ GrVkImage::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0, nullptr,
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
index 556a088fb1..9480a7b9cc 100644
--- a/src/gpu/vk/GrVkImage.cpp
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -12,6 +12,58 @@
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+VkPipelineStageFlags GrVkImage::LayoutToPipelineStageFlags(const VkImageLayout layout) {
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ }
+
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+}
+
+VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
+ // Currently we assume we will never being doing any explict shader writes (this doesn't include
+ // color attachment or depth/stencil writes). So we will ignore the
+ // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
+
+ // We can only directly access the host memory if we are in preinitialized or general layout,
+ // and the image is linear.
+ // TODO: Add check for linear here so we are not always adding host to general, and we should
+ // only be in preinitialized if we are linear
+ VkAccessFlags flags = 0;;
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT |
+ VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ flags = VK_ACCESS_HOST_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ flags = VK_ACCESS_SHADER_READ_BIT;
+ }
+ return flags;
+}
+
VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
switch (format) {
case VK_FORMAT_S8_UINT:
@@ -42,8 +94,8 @@ void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
return;
}
- VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(currentLayout);
- VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(currentLayout);
+ VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
+ VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineStageFlags(currentLayout);
VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
VkImageMemoryBarrier imageMemoryBarrier = {
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
index 038b0ee86d..e19228ca11 100644
--- a/src/gpu/vk/GrVkImage.h
+++ b/src/gpu/vk/GrVkImage.h
@@ -100,6 +100,10 @@ public:
void setResourceRelease(sk_sp<GrReleaseProcHelper> releaseHelper);
+ // Helpers to use for setting the layout of the VkImage
+ static VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
+ static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
+
protected:
void releaseImage(const GrVkGpu* gpu);
void abandonImage();
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
index e391d02aee..4f619a3ef3 100644
--- a/src/gpu/vk/GrVkMemory.cpp
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -246,58 +246,6 @@ void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
}
}
-VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
- if (VK_IMAGE_LAYOUT_GENERAL == layout) {
- return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
- } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
- return VK_PIPELINE_STAGE_TRANSFER_BIT;
- } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
- return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
- } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
- return VK_PIPELINE_STAGE_HOST_BIT;
- }
-
- SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
- return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
-}
-
-VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
- // Currently we assume we will never being doing any explict shader writes (this doesn't include
- // color attachment or depth/stencil writes). So we will ignore the
- // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
-
- // We can only directly access the host memory if we are in preinitialized or general layout,
- // and the image is linear.
- // TODO: Add check for linear here so we are not always adding host to general, and we should
- // only be in preinitialized if we are linear
- VkAccessFlags flags = 0;;
- if (VK_IMAGE_LAYOUT_GENERAL == layout) {
- flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_TRANSFER_WRITE_BIT |
- VK_ACCESS_TRANSFER_READ_BIT |
- VK_ACCESS_SHADER_READ_BIT |
- VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
- } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
- flags = VK_ACCESS_HOST_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
- flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
- flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
- flags = VK_ACCESS_TRANSFER_WRITE_BIT;
- } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
- flags = VK_ACCESS_TRANSFER_READ_BIT;
- } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
- flags = VK_ACCESS_SHADER_READ_BIT;
- }
- return flags;
-}
-
void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
diff --git a/src/gpu/vk/GrVkMemory.h b/src/gpu/vk/GrVkMemory.h
index baf843e02c..bb6681435f 100644
--- a/src/gpu/vk/GrVkMemory.h
+++ b/src/gpu/vk/GrVkMemory.h
@@ -34,10 +34,6 @@ namespace GrVkMemory {
GrVkAlloc* alloc);
void FreeImageMemory(const GrVkGpu* gpu, bool linearTiling, const GrVkAlloc& alloc);
- VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
-
- VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
-
void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size);
void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,