diff options
author | egdaniel <egdaniel@google.com> | 2016-05-02 06:50:36 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2016-05-02 06:50:36 -0700 |
commit | 778555cfda267eee031b9fc8530f988cd270fbf0 (patch) | |
tree | 9f8376ed12ce1862a031b81525f393e0ec8a27a3 | |
parent | aad1f8f70b0e79467165df7c9839489f40088c09 (diff) |
Unify DescriptorPools for vulkan uniform descriptor sets
BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1927943002
Review-Url: https://codereview.chromium.org/1927943002
-rw-r--r-- | src/gpu/vk/GrVkDescriptorPool.h | 2 | ||||
-rw-r--r-- | src/gpu/vk/GrVkPipelineState.cpp | 40 | ||||
-rw-r--r-- | src/gpu/vk/GrVkPipelineState.h | 4 | ||||
-rw-r--r-- | src/gpu/vk/GrVkPipelineStateBuilder.cpp | 42 | ||||
-rw-r--r-- | src/gpu/vk/GrVkResourceProvider.cpp | 83 | ||||
-rw-r--r-- | src/gpu/vk/GrVkResourceProvider.h | 28 |
6 files changed, 147 insertions, 52 deletions
diff --git a/src/gpu/vk/GrVkDescriptorPool.h b/src/gpu/vk/GrVkDescriptorPool.h index bb5697c26f..8fcc67086c 100644 --- a/src/gpu/vk/GrVkDescriptorPool.h +++ b/src/gpu/vk/GrVkDescriptorPool.h @@ -21,7 +21,7 @@ class GrVkGpu; */ class GrVkDescriptorPool : public GrVkResource { public: - explicit GrVkDescriptorPool(const GrVkGpu* gpu, VkDescriptorType type, uint32_t count); + GrVkDescriptorPool(const GrVkGpu* gpu, VkDescriptorType type, uint32_t count); VkDescriptorPool descPool() const { return fDescPool; } diff --git a/src/gpu/vk/GrVkPipelineState.cpp b/src/gpu/vk/GrVkPipelineState.cpp index e416069458..b101adc0fd 100644 --- a/src/gpu/vk/GrVkPipelineState.cpp +++ b/src/gpu/vk/GrVkPipelineState.cpp @@ -28,7 +28,7 @@ GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu, const GrVkPipelineState::Desc& desc, GrVkPipeline* pipeline, VkPipelineLayout layout, - VkDescriptorSetLayout dsLayout[2], + VkDescriptorSetLayout dsSamplerLayout, const BuiltinUniformHandles& builtinUniformHandles, const UniformInfoArray& uniforms, uint32_t vertexUniformSize, @@ -47,11 +47,9 @@ GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu, , fFragmentProcessors(fragmentProcessors) , fDesc(desc) , fDataManager(uniforms, vertexUniformSize, fragmentUniformSize) - , fSamplerPoolManager(dsLayout[GrVkUniformHandler::kSamplerDescSet], - VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, numSamplers, gpu) - , fUniformPoolManager(dsLayout[GrVkUniformHandler::kUniformBufferDescSet], - VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, - (vertexUniformSize || fragmentUniformSize) ? 2 : 0, gpu) { + , fSamplerPoolManager(dsSamplerLayout, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + numSamplers, gpu) + , fCurrentUniformDescPool(nullptr) { fSamplers.setReserve(numSamplers); fTextureViews.setReserve(numSamplers); fTextures.setReserve(numSamplers); @@ -126,7 +124,10 @@ void GrVkPipelineState::freeGPUResources(const GrVkGpu* gpu) { } fSamplerPoolManager.freeGPUResources(gpu); - fUniformPoolManager.freeGPUResources(gpu); + if (fCurrentUniformDescPool) { + fCurrentUniformDescPool->unref(gpu); + fCurrentUniformDescPool = nullptr; + } this->freeTempResources(gpu); } @@ -156,7 +157,10 @@ void GrVkPipelineState::abandonGPUResources() { fTextures.rewind(); fSamplerPoolManager.abandonGPUResources(); - fUniformPoolManager.abandonGPUResources(); + if (fCurrentUniformDescPool) { + fCurrentUniformDescPool->unrefAndAbandon(); + fCurrentUniformDescPool = nullptr; + } } static void append_texture_bindings(const GrProcessor& processor, @@ -205,8 +209,17 @@ void GrVkPipelineState::setData(GrVkGpu* gpu, if (fVertexUniformBuffer.get() || fFragmentUniformBuffer.get()) { if (fDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer) || VK_NULL_HANDLE == fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]) { - fUniformPoolManager.getNewDescriptorSet(gpu, - &fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]); + const GrVkDescriptorPool* pool; + int uniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet; + gpu->resourceProvider().getUniformDescriptorSet(&fDescriptorSets[uniformDSIdx], + &pool); + if (pool != fCurrentUniformDescPool) { + if (fCurrentUniformDescPool) { + fCurrentUniformDescPool->unref(gpu); + } + fCurrentUniformDescPool = pool; + fCurrentUniformDescPool->ref(); + } this->writeUniformBuffers(gpu); } } @@ -376,8 +389,8 @@ void GrVkPipelineState::addUniformResources(GrVkCommandBuffer& commandBuffer) { if (fSamplerPoolManager.fPool) { commandBuffer.addResource(fSamplerPoolManager.fPool); } - if (fUniformPoolManager.fPool) { - commandBuffer.addResource(fUniformPoolManager.fPool); + if (fCurrentUniformDescPool) { + commandBuffer.addResource(fCurrentUniformDescPool); } if (fVertexUniformBuffer.get()) { @@ -418,7 +431,8 @@ void GrVkPipelineState::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) { SkASSERT(fPool || !fMaxDescriptors); } -void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds) { +void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu, + VkDescriptorSet* ds) { if (!fMaxDescriptors) { return; } diff --git a/src/gpu/vk/GrVkPipelineState.h b/src/gpu/vk/GrVkPipelineState.h index ad3afa7314..b7f954ccbd 100644 --- a/src/gpu/vk/GrVkPipelineState.h +++ b/src/gpu/vk/GrVkPipelineState.h @@ -149,7 +149,7 @@ private: const GrVkPipelineState::Desc&, GrVkPipeline* pipeline, VkPipelineLayout layout, - VkDescriptorSetLayout dsLayout[2], + VkDescriptorSetLayout dsSamplerLayout, const BuiltinUniformHandles& builtinUniformHandles, const UniformInfoArray& uniforms, uint32_t vertexUniformSize, @@ -280,7 +280,7 @@ private: GrVkPipelineStateDataManager fDataManager; DescriptorPoolManager fSamplerPoolManager; - DescriptorPoolManager fUniformPoolManager; + const GrVkDescriptorPool* fCurrentUniformDescPool; int fNumSamplers; diff --git a/src/gpu/vk/GrVkPipelineStateBuilder.cpp b/src/gpu/vk/GrVkPipelineStateBuilder.cpp index be30bb3e4d..a191505a38 100644 --- a/src/gpu/vk/GrVkPipelineStateBuilder.cpp +++ b/src/gpu/vk/GrVkPipelineStateBuilder.cpp @@ -193,35 +193,8 @@ GrVkPipelineState* GrVkPipelineStateBuilder::finalize(GrPrimitiveType primitiveT nullptr, &dsLayout[GrVkUniformHandler::kSamplerDescSet])); - // Create Uniform Buffer Descriptor - // We always attach uniform buffers to descriptor set 1. The vertex uniform buffer will have - // binding 0 and the fragment binding 1. - VkDescriptorSetLayoutBinding dsUniBindings[2]; - memset(&dsUniBindings, 0, 2 * sizeof(VkDescriptorSetLayoutBinding)); - dsUniBindings[0].binding = GrVkUniformHandler::kVertexBinding; - dsUniBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; - dsUniBindings[0].descriptorCount = 1; - dsUniBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; - dsUniBindings[0].pImmutableSamplers = nullptr; - dsUniBindings[1].binding = GrVkUniformHandler::kFragBinding; - dsUniBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; - dsUniBindings[1].descriptorCount = 1; - dsUniBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; - dsUniBindings[1].pImmutableSamplers = nullptr; - - VkDescriptorSetLayoutCreateInfo dsUniformLayoutCreateInfo; - memset(&dsUniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo)); - dsUniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; - dsUniformLayoutCreateInfo.pNext = nullptr; - dsUniformLayoutCreateInfo.flags = 0; - dsUniformLayoutCreateInfo.bindingCount = 2; - dsUniformLayoutCreateInfo.pBindings = dsUniBindings; - - GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreateDescriptorSetLayout( - fGpu->device(), - &dsUniformLayoutCreateInfo, - nullptr, - &dsLayout[GrVkUniformHandler::kUniformBufferDescSet])); + // This layout is not owned by the PipelineStateBuilder and thus should no be destroyed + dsLayout[GrVkUniformHandler::kUniformBufferDescSet] = fGpu->resourceProvider().getUniDSLayout(); // Create the VkPipelineLayout VkPipelineLayoutCreateInfo layoutCreateInfo; @@ -277,10 +250,11 @@ GrVkPipelineState* GrVkPipelineStateBuilder::finalize(GrPrimitiveType primitiveT if (!pipeline) { GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout, nullptr)); - GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[0], - nullptr)); - GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[1], - nullptr)); + GR_VK_CALL(fGpu->vkInterface(), + DestroyDescriptorSetLayout(fGpu->device(), + dsLayout[GrVkUniformHandler::kSamplerDescSet], + nullptr)); + this->cleanupFragmentProcessors(); return nullptr; } @@ -289,7 +263,7 @@ GrVkPipelineState* GrVkPipelineStateBuilder::finalize(GrPrimitiveType primitiveT desc, pipeline, pipelineLayout, - dsLayout, + dsLayout[GrVkUniformHandler::kSamplerDescSet], fUniformHandles, fUniformHandler.fUniforms, fUniformHandler.fCurrentVertexUBOOffset, diff --git a/src/gpu/vk/GrVkResourceProvider.cpp b/src/gpu/vk/GrVkResourceProvider.cpp index 8ba5ade333..bf283fc512 100644 --- a/src/gpu/vk/GrVkResourceProvider.cpp +++ b/src/gpu/vk/GrVkResourceProvider.cpp @@ -19,8 +19,11 @@ SkTDynamicHash<GrVkResource, uint32_t> GrVkResource::fTrace; SkRandom GrVkResource::fRandom; #endif -GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu) : fGpu(gpu) - , fPipelineCache(VK_NULL_HANDLE) { +GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu) + : fGpu(gpu) + , fPipelineCache(VK_NULL_HANDLE) + , fUniformDescPool(nullptr) + , fCurrentUniformDescCount(0) { fPipelineStateCache = new PipelineStateCache(gpu); } @@ -30,6 +33,39 @@ GrVkResourceProvider::~GrVkResourceProvider() { delete fPipelineStateCache; } +void GrVkResourceProvider::initUniformDescObjects() { + // Create Uniform Buffer Descriptor + // The vertex uniform buffer will have binding 0 and the fragment binding 1. + VkDescriptorSetLayoutBinding dsUniBindings[2]; + memset(&dsUniBindings, 0, 2 * sizeof(VkDescriptorSetLayoutBinding)); + dsUniBindings[0].binding = GrVkUniformHandler::kVertexBinding; + dsUniBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + dsUniBindings[0].descriptorCount = 1; + dsUniBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; + dsUniBindings[0].pImmutableSamplers = nullptr; + dsUniBindings[1].binding = GrVkUniformHandler::kFragBinding; + dsUniBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + dsUniBindings[1].descriptorCount = 1; + dsUniBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + dsUniBindings[1].pImmutableSamplers = nullptr; + + VkDescriptorSetLayoutCreateInfo dsUniformLayoutCreateInfo; + memset(&dsUniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo)); + dsUniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + dsUniformLayoutCreateInfo.pNext = nullptr; + dsUniformLayoutCreateInfo.flags = 0; + dsUniformLayoutCreateInfo.bindingCount = 2; + dsUniformLayoutCreateInfo.pBindings = dsUniBindings; + + GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreateDescriptorSetLayout(fGpu->device(), + &dsUniformLayoutCreateInfo, + nullptr, + &fUniformDescLayout)); + fCurrMaxUniDescriptors = kStartNumUniformDescriptors; + fUniformDescPool = this->findOrCreateCompatibleDescriptorPool(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + fCurrMaxUniDescriptors); +} + void GrVkResourceProvider::init() { VkPipelineCacheCreateInfo createInfo; memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo)); @@ -45,6 +81,8 @@ void GrVkResourceProvider::init() { if (VK_SUCCESS != result) { fPipelineCache = VK_NULL_HANDLE; } + + this->initUniformDescObjects(); } GrVkPipeline* GrVkResourceProvider::createPipeline(const GrPipeline& pipeline, @@ -105,6 +143,36 @@ sk_sp<GrVkPipelineState> GrVkResourceProvider::findOrCreateCompatiblePipelineSta return fPipelineStateCache->refPipelineState(pipeline, proc, primitiveType, renderPass); } +void GrVkResourceProvider::getUniformDescriptorSet(VkDescriptorSet* ds, + const GrVkDescriptorPool** outPool) { + fCurrentUniformDescCount += kNumUniformDescPerSet; + if (fCurrentUniformDescCount > fCurrMaxUniDescriptors) { + fUniformDescPool->unref(fGpu); + if (fCurrMaxUniDescriptors < kMaxUniformDescriptors >> 1) { + fCurrMaxUniDescriptors = fCurrMaxUniDescriptors << 1; + } else { + fCurrMaxUniDescriptors = kMaxUniformDescriptors; + } + fUniformDescPool = + this->findOrCreateCompatibleDescriptorPool(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + fCurrMaxUniDescriptors); + fCurrentUniformDescCount = kNumUniformDescPerSet; + } + SkASSERT(fUniformDescPool); + + VkDescriptorSetAllocateInfo dsAllocateInfo; + memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo)); + dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + dsAllocateInfo.pNext = nullptr; + dsAllocateInfo.descriptorPool = fUniformDescPool->descPool(); + dsAllocateInfo.descriptorSetCount = 1; + dsAllocateInfo.pSetLayouts = &fUniformDescLayout; + GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), AllocateDescriptorSets(fGpu->device(), + &dsAllocateInfo, + ds)); + *outPool = fUniformDescPool; +} + GrVkCommandBuffer* GrVkResourceProvider::createCommandBuffer() { GrVkCommandBuffer* cmdBuffer = GrVkCommandBuffer::Create(fGpu, fGpu->cmdPool()); fActiveCommandBuffers.push_back(cmdBuffer); @@ -151,6 +219,14 @@ void GrVkResourceProvider::destroyResources() { GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr)); fPipelineCache = VK_NULL_HANDLE; + + if (fUniformDescLayout) { + GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), + fUniformDescLayout, + nullptr)); + fUniformDescLayout = VK_NULL_HANDLE; + } + fUniformDescPool->unref(fGpu); } void GrVkResourceProvider::abandonResources() { @@ -179,4 +255,7 @@ void GrVkResourceProvider::abandonResources() { SkASSERT(0 == GrVkResource::fTrace.count()); #endif fPipelineCache = VK_NULL_HANDLE; + + fUniformDescLayout = VK_NULL_HANDLE; + fUniformDescPool->unrefAndAbandon(); } diff --git a/src/gpu/vk/GrVkResourceProvider.h b/src/gpu/vk/GrVkResourceProvider.h index 86aa5bce36..37ec6fdee5 100644 --- a/src/gpu/vk/GrVkResourceProvider.h +++ b/src/gpu/vk/GrVkResourceProvider.h @@ -70,6 +70,18 @@ public: GrPrimitiveType, const GrVkRenderPass& renderPass); + // For all our GrVkPipelineState objects, we require a layout where the first set contains two + // uniform buffers, one for the vertex shader and one for the fragment shader. Thus it is + // possible for us to use a shadered descriptor pool to allocate all these similar descriptor + // sets. The caller is responsible for reffing the outPool for as long as the returned + // VkDescriptor set is in use. + void getUniformDescriptorSet(VkDescriptorSet*, const GrVkDescriptorPool** outPool); + + // Returns the compatible VkDescriptorSetLayout to use for uniform buffers. The caller does not + // own the VkDescriptorSetLayout and thus should not delete it. This function should be used + // when the caller needs the layout to create a VkPipelineLayout. + VkDescriptorSetLayout getUniDSLayout() const { return fUniformDescLayout; } + // Destroy any cached resources. To be called before destroying the VkDevice. // The assumption is that all queues are idle and all command buffers are finished. // For resource tracing to work properly, this should be called after unrefing all other @@ -122,6 +134,9 @@ private: #endif }; + // Initialiaze the vkDescriptorSetLayout used for allocating new uniform buffer descritpor sets. + void initUniformDescObjects(); + GrVkGpu* fGpu; // Central cache for creating pipelines @@ -140,6 +155,19 @@ private: // Cache of GrVkPipelineStates PipelineStateCache* fPipelineStateCache; + + // Current pool to allocate uniform descriptor sets from + const GrVkDescriptorPool* fUniformDescPool; + VkDescriptorSetLayout fUniformDescLayout; + //Curent number of uniform descriptors allocated from the pool + int fCurrentUniformDescCount; + int fCurrMaxUniDescriptors; + + enum { + kMaxUniformDescriptors = 1024, + kNumUniformDescPerSet = 2, + kStartNumUniformDescriptors = 16, // must be less than kMaxUniformDescriptors + }; }; #endif |