aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/vk/GrVkGpuCommandBuffer.cpp37
-rw-r--r--src/gpu/vk/GrVkGpuCommandBuffer.h4
2 files changed, 23 insertions, 18 deletions
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
index fe9585f9df..7c20843279 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -192,6 +192,15 @@ void GrVkGpuRTCommandBuffer::submit() {
false);
}
+ // If we have any sampled images set their layout now.
+ for (int j = 0; j < cbInfo.fSampledImages.count(); ++j) {
+ cbInfo.fSampledImages[j]->setImageLayout(fGpu,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_ACCESS_SHADER_READ_BIT,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ false);
+ }
+
// TODO: We can't add this optimization yet since many things create a scratch texture which
// adds the discard immediately, but then don't draw to it right away. This causes the
// discard to be ignored and we get yelled at for loading uninitialized data. However, once
@@ -545,18 +554,9 @@ GrVkPipelineState* GrVkGpuRTCommandBuffer::prepareDrawState(const GrPipeline& pi
return pipelineState;
}
-static void set_texture_layout(GrVkTexture* vkTexture, GrVkGpu* gpu) {
- // TODO: If we ever decide to create the secondary command buffers ahead of time before we
- // are actually going to submit them, we will need to track the sampled images and delay
- // adding the layout change/barrier until we are ready to submit.
- vkTexture->setImageLayout(gpu,
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
- VK_ACCESS_SHADER_READ_BIT,
- VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
- false);
-}
-
-static void prepare_sampled_images(const GrResourceIOProcessor& processor, GrVkGpu* gpu) {
+static void prepare_sampled_images(const GrResourceIOProcessor& processor,
+ SkTArray<GrVkImage*>* sampledImages,
+ GrVkGpu* gpu) {
for (int i = 0; i < processor.numTextureSamplers(); ++i) {
const GrResourceIOProcessor::TextureSampler& sampler = processor.textureSampler(i);
GrVkTexture* vkTexture = static_cast<GrVkTexture*>(sampler.peekTexture());
@@ -574,7 +574,7 @@ static void prepare_sampled_images(const GrResourceIOProcessor& processor, GrVkG
vkTexture->texturePriv().markMipMapsClean();
}
}
- set_texture_layout(vkTexture, gpu);
+ sampledImages->push_back(vkTexture);
}
}
@@ -589,13 +589,16 @@ void GrVkGpuRTCommandBuffer::onDraw(const GrPipeline& pipeline,
if (!meshCount) {
return;
}
- prepare_sampled_images(primProc, fGpu);
+
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
+
+ prepare_sampled_images(primProc, &cbInfo.fSampledImages, fGpu);
GrFragmentProcessor::Iter iter(pipeline);
while (const GrFragmentProcessor* fp = iter.next()) {
- prepare_sampled_images(*fp, fGpu);
+ prepare_sampled_images(*fp, &cbInfo.fSampledImages, fGpu);
}
if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
- set_texture_layout(static_cast<GrVkTexture*>(dstTexture), fGpu);
+ cbInfo.fSampledImages.push_back(static_cast<GrVkTexture*>(dstTexture));
}
GrPrimitiveType primitiveType = meshes[0].primitiveType();
@@ -607,8 +610,6 @@ void GrVkGpuRTCommandBuffer::onDraw(const GrPipeline& pipeline,
return;
}
- CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
-
for (int i = 0; i < meshCount; ++i) {
const GrMesh& mesh = meshes[i];
if (mesh.primitiveType() != primitiveType) {
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.h b/src/gpu/vk/GrVkGpuCommandBuffer.h
index 2aa457bad0..0254a3fe3b 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.h
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.h
@@ -163,6 +163,10 @@ private:
// command buffer.
SkTArray<InlineUploadInfo> fPreDrawUploads;
SkTArray<CopyInfo> fPreCopies;
+ // Array of images that will be sampled and thus need to be transfered to sampled layout
+ // before submitting the secondary command buffers. This must happen after we do any predraw
+ // uploads or copies.
+ SkTArray<GrVkImage*> fSampledImages;
GrVkSecondaryCommandBuffer* currentCmdBuf() {
return fCommandBuffers.back();