aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/vk/GrVkUniformBuffer.cpp
blob: ac013f7dfe7a8b3ae178d992eec5a3dd4892c3f9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/

#include "GrVkUniformBuffer.h"
#include "GrVkGpu.h"

#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)

GrVkUniformBuffer* GrVkUniformBuffer::Create(GrVkGpu* gpu, size_t size) {
    if (0 == size) {
        return nullptr;
    }
    const GrVkResource* resource = nullptr;
    if (size <= GrVkUniformBuffer::kStandardSize) {
        resource = gpu->resourceProvider().findOrCreateStandardUniformBufferResource();
    } else {
        resource = CreateResource(gpu, size);
    }
    if (!resource) {
        return nullptr;
    }

    GrVkBuffer::Desc desc;
    desc.fDynamic = true;
    desc.fType = GrVkBuffer::kUniform_Type;
    desc.fSizeInBytes = size;
    GrVkUniformBuffer* buffer = new GrVkUniformBuffer(gpu, desc,
                                                      (const GrVkUniformBuffer::Resource*) resource);
    if (!buffer) {
        // this will destroy anything we got from the resource provider,
        // but this avoids a conditional
        resource->unref(gpu);
    }
    return buffer;
}

// We implement our own creation function for special buffer resource type
const GrVkResource* GrVkUniformBuffer::CreateResource(GrVkGpu* gpu, size_t size) {
    if (0 == size) {
        return nullptr;
    }

    VkBuffer       buffer;
    GrVkAlloc      alloc;

    // create the buffer object
    VkBufferCreateInfo bufInfo;
    memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
    bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    bufInfo.flags = 0;
    bufInfo.size = size;
    bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
    bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
    bufInfo.queueFamilyIndexCount = 0;
    bufInfo.pQueueFamilyIndices = nullptr;

    VkResult err;
    err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
    if (err) {
        return nullptr;
    }

    if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
                                              buffer,
                                              kUniform_Type,
                                              true,  // dynamic
                                              &alloc)) {
        return nullptr;
    }

    const GrVkResource* resource = new GrVkUniformBuffer::Resource(buffer, alloc);
    if (!resource) {
        VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
        GrVkMemory::FreeBufferMemory(gpu, kUniform_Type, alloc);
        return nullptr;
    }

    return resource;
}

const GrVkBuffer::Resource* GrVkUniformBuffer::createResource(GrVkGpu* gpu,
                                                              const GrVkBuffer::Desc& descriptor) {
    const GrVkResource* vkResource;
    if (descriptor.fSizeInBytes <= GrVkUniformBuffer::kStandardSize) {
        GrVkResourceProvider& provider = gpu->resourceProvider();
        vkResource = provider.findOrCreateStandardUniformBufferResource();
    } else {
        vkResource = CreateResource(gpu, descriptor.fSizeInBytes);
    }
    return (const GrVkBuffer::Resource*) vkResource;
}

void GrVkUniformBuffer::Resource::onRecycle(GrVkGpu* gpu) const {
    if (fAlloc.fSize <= GrVkUniformBuffer::kStandardSize) {
        gpu->resourceProvider().recycleStandardUniformBufferResource(this);
    } else {
        this->unref(gpu);
    }
}