aboutsummaryrefslogtreecommitdiffhomepage
path: root/tests/GrMemoryPoolTest.cpp
diff options
context:
space:
mode:
authorGravatar dskiba <dskiba@chromium.org>2016-11-29 06:50:35 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2016-11-29 06:50:35 -0800
commite4cd00699167cefde9abedbd49ede64f82d552c7 (patch)
tree4921bc19c367263ce66532675cb846ee3bc383ee /tests/GrMemoryPoolTest.cpp
parentc51c18fd783737391e26541cbb5a72af2396ef7b (diff)
Make GrMemoryPool play nice with bucketing allocators.
Some memory allocators have very coarse size buckets, so for example on Android (jemalloc) an attempt to allocate 32 KiB + 1 byte will end up allocating 40 KiB, wasting 8 KiB. GrMemoryPool ctor takes two arguments that specify prealloc / block sizes, and then inflates them to accommodate some bookkeeping structures. Since most places create GrMemoryPools with pow2 numbers (which have buckets in most allocators) the inflation causes allocator to select next size bucket, wasting memory. This CL makes GrMemoryPool to stop inflating sizes it was created with, and allocate specified amounts exactly. Part of allocated memory is then used for bookkeeping structures. Additionally, GrObjectMemoryPool template is provided, which takes prealloc / block object counts (instead of sizes) and guarantees that specified number of objects will fit in prealloc / block spaces. BUG=651872 GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2525773002 Review-Url: https://codereview.chromium.org/2525773002
Diffstat (limited to 'tests/GrMemoryPoolTest.cpp')
-rw-r--r--tests/GrMemoryPoolTest.cpp171
1 files changed, 171 insertions, 0 deletions
diff --git a/tests/GrMemoryPoolTest.cpp b/tests/GrMemoryPoolTest.cpp
index 863574a35f..4eb5ca786f 100644
--- a/tests/GrMemoryPoolTest.cpp
+++ b/tests/GrMemoryPoolTest.cpp
@@ -10,6 +10,7 @@
#if SK_SUPPORT_GPU
#include "GrMemoryPool.h"
#include "SkRandom.h"
+#include "SkTArray.h"
#include "SkTDArray.h"
#include "SkTemplates.h"
@@ -227,4 +228,174 @@ DEF_TEST(GrMemoryPool, reporter) {
}
}
+// GrMemoryPool requires that it's empty at the point of destruction. This helps
+// achieving that by releasing all added memory in the destructor.
+class AutoPoolReleaser {
+public:
+ AutoPoolReleaser(GrMemoryPool& pool): fPool(pool) {
+ }
+ ~AutoPoolReleaser() {
+ for (void* ptr: fAllocated) {
+ fPool.release(ptr);
+ }
+ }
+ void add(void* ptr) {
+ fAllocated.push_back(ptr);
+ }
+private:
+ GrMemoryPool& fPool;
+ SkTArray<void*> fAllocated;
+};
+
+DEF_TEST(GrMemoryPoolAPI, reporter) {
+ constexpr size_t kSmallestMinAllocSize = GrMemoryPool::kSmallestMinAllocSize;
+
+ // Allocates memory until pool adds a new block (pool.size() changes).
+ auto allocateMemory = [](GrMemoryPool& pool, AutoPoolReleaser& r) {
+ size_t origPoolSize = pool.size();
+ while (pool.size() == origPoolSize) {
+ r.add(pool.allocate(31));
+ }
+ };
+
+ // Effective prealloc space capacity is >= kSmallestMinAllocSize.
+ {
+ GrMemoryPool pool(0, 0);
+ REPORTER_ASSERT(reporter, pool.preallocSize() == kSmallestMinAllocSize);
+ }
+
+ // Effective prealloc space capacity is >= minAllocSize.
+ {
+ constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2;
+ GrMemoryPool pool(kSmallestMinAllocSize, kMinAllocSize);
+ REPORTER_ASSERT(reporter, pool.preallocSize() == kMinAllocSize);
+ }
+
+ // Effective block size capacity >= kSmallestMinAllocSize.
+ {
+ GrMemoryPool pool(kSmallestMinAllocSize, kSmallestMinAllocSize / 2);
+ AutoPoolReleaser r(pool);
+
+ allocateMemory(pool, r);
+ REPORTER_ASSERT(reporter, pool.size() == kSmallestMinAllocSize);
+ }
+
+ // Pool allocates exactly preallocSize on creation.
+ {
+ constexpr size_t kPreallocSize = kSmallestMinAllocSize * 5;
+ GrMemoryPool pool(kPreallocSize, 0);
+ REPORTER_ASSERT(reporter, pool.preallocSize() == kPreallocSize);
+ }
+
+ // Pool allocates exactly minAllocSize when it expands.
+ {
+ constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 7;
+ GrMemoryPool pool(0, kMinAllocSize);
+ AutoPoolReleaser r(pool);
+
+ allocateMemory(pool, r);
+ REPORTER_ASSERT(reporter, pool.size() == kMinAllocSize);
+
+ allocateMemory(pool, r);
+ REPORTER_ASSERT(reporter, pool.size() == 2 * kMinAllocSize);
+ }
+
+ // When asked to allocate amount > minAllocSize, pool allocates larger block
+ // to accommodate all internal structures.
+ {
+ constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2;
+ GrMemoryPool pool(kSmallestMinAllocSize, kMinAllocSize);
+ AutoPoolReleaser r(pool);
+
+ REPORTER_ASSERT(reporter, pool.size() == 0);
+
+ constexpr size_t hugeSize = 10 * kMinAllocSize;
+ r.add(pool.allocate(hugeSize));
+ REPORTER_ASSERT(reporter, pool.size() > hugeSize);
+
+ // Block size allocated to accommodate huge request doesn't include any extra
+ // space, so next allocation request allocates a new block.
+ size_t hugeBlockSize = pool.size();
+ r.add(pool.allocate(0));
+ REPORTER_ASSERT(reporter, pool.size() == hugeBlockSize + kMinAllocSize);
+ }
+}
+
+DEF_TEST(GrObjectMemoryPoolAPI, reporter) {
+ struct Data {
+ int value[5];
+ };
+ using DataObjectPool = GrObjectMemoryPool<Data>;
+ constexpr size_t kSmallestMinAllocCount = DataObjectPool::kSmallestMinAllocCount;
+
+ // Allocates objects until pool adds a new block (pool.size() changes).
+ // Returns number of objects that fit into the current block (i.e. before pool.size()
+ // changed; newly allocated block always ends up with one object allocated from it).
+ auto allocateObjects = [](DataObjectPool& pool, AutoPoolReleaser& r) -> size_t {
+ size_t count = 0;
+ size_t origPoolSize = pool.size();
+ while (pool.size() == origPoolSize) {
+ r.add(pool.allocate());
+ count++;
+ }
+ return count - 1;
+ };
+
+ // Effective prealloc space capacity is >= kSmallestMinAllocCount.
+ {
+ DataObjectPool pool(kSmallestMinAllocCount / 3, 0);
+ AutoPoolReleaser r(pool);
+
+ size_t preallocCount = allocateObjects(pool, r);
+ REPORTER_ASSERT(reporter, preallocCount == kSmallestMinAllocCount);
+ }
+
+ // Effective prealloc space capacity is >= minAllocCount.
+ {
+ DataObjectPool pool(kSmallestMinAllocCount, 2 * kSmallestMinAllocCount);
+ AutoPoolReleaser r(pool);
+
+ size_t preallocCount = allocateObjects(pool, r);
+ REPORTER_ASSERT(reporter, preallocCount == 2 * kSmallestMinAllocCount);
+ }
+
+ // Effective block capacity is >= kSmallestMinAllocCount.
+ {
+ DataObjectPool pool(kSmallestMinAllocCount, kSmallestMinAllocCount / 2);
+ AutoPoolReleaser r(pool);
+
+ // Fill prealloc space
+ allocateObjects(pool, r);
+
+ size_t minAllocCount = 1 + allocateObjects(pool, r);
+ REPORTER_ASSERT(reporter, minAllocCount == kSmallestMinAllocCount);
+ }
+
+ // Pool allocates space for exactly preallocCount objects on creation.
+ {
+ constexpr size_t kPreallocCount = kSmallestMinAllocCount * 7 / 3;
+ DataObjectPool pool(kPreallocCount, 0);
+ AutoPoolReleaser r(pool);
+
+ size_t preallocCount = allocateObjects(pool, r);
+ REPORTER_ASSERT(reporter, preallocCount == kPreallocCount);
+ }
+
+ // Pool allocates space for minAllocCount objects when it adds a new block.
+ {
+ constexpr size_t kMinAllocCount = kSmallestMinAllocCount * 11 / 3;
+ DataObjectPool pool(0, kMinAllocCount);
+ AutoPoolReleaser r(pool);
+
+ // Fill prealloc space
+ allocateObjects(pool, r);
+
+ size_t firstBlockCount = 1 + allocateObjects(pool, r);
+ REPORTER_ASSERT(reporter, firstBlockCount == kMinAllocCount);
+
+ size_t secondBlockCount = 1 + allocateObjects(pool, r);
+ REPORTER_ASSERT(reporter, secondBlockCount == kMinAllocCount);
+ }
+}
+
#endif