aboutsummaryrefslogtreecommitdiffhomepage
path: root/tests
diff options
context:
space:
mode:
authorGravatar jvanverth <jvanverth@google.com>2016-09-23 10:30:04 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2016-09-23 10:30:04 -0700
commit68c3d30702c8af5ae249c78b30a9ea2351f875ed (patch)
tree65b328b41a8cd4729e200cb554950f866bcc26d7 /tests
parent667a007ab3a0e46c78cece172d8720ee62c99cdf (diff)
Some Vulkan memory fixes and cleanup
* Switch back to not setting transfer_dst on all buffers * Add some missing unit tests * Add tracking of heap usage for debugging purposes * Fall back to non-device-local memory if device-local allocation fails BUG=skia:5031 GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2356343003 Committed: https://skia.googlesource.com/skia/+/c5850e9fdb62cc4ae5ed2b6af51aea92cac07455 Review-Url: https://codereview.chromium.org/2356343003
Diffstat (limited to 'tests')
-rwxr-xr-xtests/VkHeapTests.cpp64
1 files changed, 45 insertions, 19 deletions
diff --git a/tests/VkHeapTests.cpp b/tests/VkHeapTests.cpp
index c4a9beb5f2..4561c90803 100755
--- a/tests/VkHeapTests.cpp
+++ b/tests/VkHeapTests.cpp
@@ -21,8 +21,8 @@ using sk_gpu_test::GrContextFactory;
void subheap_test(skiatest::Reporter* reporter, GrContext* context) {
GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
- // heap index doesn't matter, we're just testing the suballocation algorithm so we'll use 0
- GrVkSubHeap heap(gpu, 0, 64 * 1024, 32);
+ // memtype doesn't matter, we're just testing the suballocation algorithm so we'll use 0
+ GrVkSubHeap heap(gpu, 0, 0, 64 * 1024, 32);
GrVkAlloc alloc0, alloc1, alloc2, alloc3;
// test full allocation and free
REPORTER_ASSERT(reporter, heap.alloc(64 * 1024, &alloc0));
@@ -118,34 +118,35 @@ void subheap_test(skiatest::Reporter* reporter, GrContext* context) {
void suballoc_test(skiatest::Reporter* reporter, GrContext* context) {
GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
- // heap index doesn't matter, we're just testing the allocation algorithm so we'll use 0
+ // memtype/heap index don't matter, we're just testing the allocation algorithm so we'll use 0
GrVkHeap heap(gpu, GrVkHeap::kSubAlloc_Strategy, 64 * 1024);
GrVkAlloc alloc0, alloc1, alloc2, alloc3;
const VkDeviceSize kAlignment = 16;
+ const uint32_t kMemType = 0;
const uint32_t kHeapIndex = 0;
REPORTER_ASSERT(reporter, heap.allocSize() == 0 && heap.usedSize() == 0);
// fragment allocations so we need to grow heap
- REPORTER_ASSERT(reporter, heap.alloc(19 * 1024 - 3, kAlignment, kHeapIndex, &alloc0));
- REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 9, kAlignment, kHeapIndex, &alloc1));
- REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 15, kAlignment, kHeapIndex, &alloc2));
- REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 6, kAlignment, kHeapIndex, &alloc3));
+ REPORTER_ASSERT(reporter, heap.alloc(19 * 1024 - 3, kAlignment, kMemType, kHeapIndex, &alloc0));
+ REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 9, kAlignment, kMemType, kHeapIndex, &alloc1));
+ REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 15, kAlignment, kMemType, kHeapIndex, &alloc2));
+ REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 6, kAlignment, kMemType, kHeapIndex, &alloc3));
REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() == 42 * 1024);
heap.free(alloc0);
REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() == 23 * 1024);
heap.free(alloc2);
REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() == 8 * 1024);
// we expect the heap to grow here
- REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc0));
+ REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kMemType, kHeapIndex, &alloc0));
REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 48 * 1024);
heap.free(alloc3);
REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 45 * 1024);
// heap should not grow here (first subheap has exactly enough room)
- REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc3));
+ REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kMemType, kHeapIndex, &alloc3));
REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 85 * 1024);
// heap should not grow here (second subheap has room)
- REPORTER_ASSERT(reporter, heap.alloc(22 * 1024, kAlignment, kHeapIndex, &alloc2));
+ REPORTER_ASSERT(reporter, heap.alloc(22 * 1024, kAlignment, kMemType, kHeapIndex, &alloc2));
REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 107 * 1024);
heap.free(alloc1);
REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 102 * 1024);
@@ -156,45 +157,58 @@ void suballoc_test(skiatest::Reporter* reporter, GrContext* context) {
heap.free(alloc3);
REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 0 * 1024);
// heap should not grow here (allocating more than subheap size)
- REPORTER_ASSERT(reporter, heap.alloc(128 * 1024, kAlignment, kHeapIndex, &alloc0));
+ REPORTER_ASSERT(reporter, heap.alloc(128 * 1024, kAlignment, kMemType, kHeapIndex, &alloc0));
REPORTER_ASSERT(reporter, 0 == alloc0.fSize);
REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 0 * 1024);
heap.free(alloc0);
+ REPORTER_ASSERT(reporter, heap.alloc(24 * 1024, kAlignment, kMemType, kHeapIndex, &alloc0));
+ REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize() == 24 * 1024);
+ // heap should alloc a new subheap because the memory type is different
+ REPORTER_ASSERT(reporter, heap.alloc(24 * 1024, kAlignment, kMemType+1, kHeapIndex, &alloc1));
+ REPORTER_ASSERT(reporter, heap.allocSize() == 192 * 1024 && heap.usedSize() == 48 * 1024);
+ // heap should alloc a new subheap because the alignment is different
+ REPORTER_ASSERT(reporter, heap.alloc(24 * 1024, 128, kMemType, kHeapIndex, &alloc2));
+ REPORTER_ASSERT(reporter, heap.allocSize() == 256 * 1024 && heap.usedSize() == 72 * 1024);
+ heap.free(alloc2);
+ heap.free(alloc0);
+ heap.free(alloc1);
+ REPORTER_ASSERT(reporter, heap.allocSize() == 256 * 1024 && heap.usedSize() == 0 * 1024);
}
void singlealloc_test(skiatest::Reporter* reporter, GrContext* context) {
GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu());
- // heap index doesn't matter, we're just testing the allocation algorithm so we'll use 0
+ // memtype/heap index don't matter, we're just testing the allocation algorithm so we'll use 0
GrVkHeap heap(gpu, GrVkHeap::kSingleAlloc_Strategy, 64 * 1024);
GrVkAlloc alloc0, alloc1, alloc2, alloc3;
const VkDeviceSize kAlignment = 64;
+ const uint32_t kMemType = 0;
const uint32_t kHeapIndex = 0;
REPORTER_ASSERT(reporter, heap.allocSize() == 0 && heap.usedSize() == 0);
// make a few allocations
- REPORTER_ASSERT(reporter, heap.alloc(49 * 1024 - 3, kAlignment, kHeapIndex, &alloc0));
- REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 37, kAlignment, kHeapIndex, &alloc1));
- REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 11, kAlignment, kHeapIndex, &alloc2));
- REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 29, kAlignment, kHeapIndex, &alloc3));
+ REPORTER_ASSERT(reporter, heap.alloc(49 * 1024 - 3, kAlignment, kMemType, kHeapIndex, &alloc0));
+ REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 37, kAlignment, kMemType, kHeapIndex, &alloc1));
+ REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 11, kAlignment, kMemType, kHeapIndex, &alloc2));
+ REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 29, kAlignment, kMemType, kHeapIndex, &alloc3));
REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 72 * 1024);
heap.free(alloc0);
REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 23 * 1024);
heap.free(alloc2);
REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 8 * 1024);
// heap should not grow here (first subheap has room)
- REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc0));
+ REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kMemType, kHeapIndex, &alloc0));
REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 48 * 1024);
heap.free(alloc3);
REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 45 * 1024);
// check for exact fit -- heap should not grow here (third subheap has room)
- REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 63, kAlignment, kHeapIndex, &alloc2));
+ REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 63, kAlignment, kMemType, kHeapIndex, &alloc2));
REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 60 * 1024);
heap.free(alloc2);
REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() == 45 * 1024);
// heap should grow here (no subheap has room)
- REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &alloc3));
+ REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kMemType, kHeapIndex, &alloc3));
REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 85 * 1024);
heap.free(alloc1);
REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 80 * 1024);
@@ -202,6 +216,18 @@ void singlealloc_test(skiatest::Reporter* reporter, GrContext* context) {
REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 40 * 1024);
heap.free(alloc3);
REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 0 * 1024);
+ REPORTER_ASSERT(reporter, heap.alloc(24 * 1024, kAlignment, kMemType, kHeapIndex, &alloc0));
+ REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize() == 24 * 1024);
+ // heap should alloc a new subheap because the memory type is different
+ REPORTER_ASSERT(reporter, heap.alloc(24 * 1024, kAlignment, kMemType + 1, kHeapIndex, &alloc1));
+ REPORTER_ASSERT(reporter, heap.allocSize() == 136 * 1024 && heap.usedSize() == 48 * 1024);
+ // heap should alloc a new subheap because the alignment is different
+ REPORTER_ASSERT(reporter, heap.alloc(24 * 1024, 128, kMemType, kHeapIndex, &alloc2));
+ REPORTER_ASSERT(reporter, heap.allocSize() == 160 * 1024 && heap.usedSize() == 72 * 1024);
+ heap.free(alloc1);
+ heap.free(alloc2);
+ heap.free(alloc0);
+ REPORTER_ASSERT(reporter, heap.allocSize() == 160 * 1024 && heap.usedSize() == 0 * 1024);
}
DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkHeapTests, reporter, ctxInfo) {