aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2014-11-13 12:41:14 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2014-11-13 12:41:14 -0800
commitf2950b1c4538fa638a594af6beacd3d1434eb74d (patch)
treeae71e205e2ff3f74cf5a12b412d83958df99356d
parentb2db898573e3cdcc8234eebf51961bfc4977ebbc (diff)
Deparameterize SkVarAlloc.
SkRecord performance is not sensitive to these values, so we can cut some storage. This rearranges the space-remaining logic to use a count of bytes left rather than a pointer to the end, cutting memory usage a little more. An SkVarAlloc used to weigh 20 or 32 bytes which now becomes 16 or 24. I think if I think about it a bit more I can trim off that Block* too, getting us to 12 or 16 bytes. Because we now just always grow by doubling, this CL switches from storing fSmallest to its log base 2. This has the nice effect of never having to worry about it overflowing, and means we can probably squeeze it down into a byte if we want, even 6 bits. BUG=skia: Committed: https://skia.googlesource.com/skia/+/bc415389855888af5a1282ca4b6bee30afa3d69d CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu12-ShuttleA-GTX660-x86-Debug-Trybot Review URL: https://codereview.chromium.org/721313002
-rw-r--r--src/core/SkRecord.h2
-rw-r--r--src/core/SkVarAlloc.cpp13
-rw-r--r--src/core/SkVarAlloc.h15
-rw-r--r--tests/RecordTest.cpp16
4 files changed, 17 insertions, 29 deletions
diff --git a/src/core/SkRecord.h b/src/core/SkRecord.h
index 8302db4505..6019e03406 100644
--- a/src/core/SkRecord.h
+++ b/src/core/SkRecord.h
@@ -30,7 +30,7 @@ class SkRecord : SkNoncopyable {
kFirstReserveCount = 64 / sizeof(void*),
};
public:
- SkRecord() : fAlloc(1024, 2.0f), fCount(0), fReserved(0) {}
+ SkRecord() : fCount(0), fReserved(0) {}
~SkRecord() {
Destroyer destroyer;
diff --git a/src/core/SkVarAlloc.cpp b/src/core/SkVarAlloc.cpp
index 5c3a41c6cf..cc1fc1c734 100644
--- a/src/core/SkVarAlloc.cpp
+++ b/src/core/SkVarAlloc.cpp
@@ -1,5 +1,4 @@
#include "SkVarAlloc.h"
-#include "SkScalar.h"
// We use non-standard malloc diagnostic methods to make sure our allocations are sized well.
#if defined(SK_BUILD_FOR_MAC)
@@ -20,11 +19,10 @@ struct SkVarAlloc::Block {
}
};
-SkVarAlloc::SkVarAlloc(size_t smallest, float growth)
+SkVarAlloc::SkVarAlloc()
: fByte(NULL)
- , fLimit(NULL)
- , fSmallest(SkToUInt(smallest))
- , fGrowth(growth)
+ , fRemaining(0)
+ , fLgMinSize(4)
, fBlock(NULL) {}
SkVarAlloc::~SkVarAlloc() {
@@ -39,14 +37,13 @@ SkVarAlloc::~SkVarAlloc() {
void SkVarAlloc::makeSpace(size_t bytes, unsigned flags) {
SkASSERT(SkIsAlignPtr(bytes));
- size_t alloc = fSmallest;
+ size_t alloc = 1<<(fLgMinSize++);
while (alloc < bytes + sizeof(Block)) {
alloc *= 2;
}
fBlock = Block::Alloc(fBlock, alloc, flags);
fByte = fBlock->data();
- fLimit = fByte + alloc - sizeof(Block);
- fSmallest = SkToUInt(SkScalarTruncToInt(fSmallest * fGrowth));
+ fRemaining = alloc - sizeof(Block);
#if defined(SK_BUILD_FOR_MAC)
SkASSERT(alloc == malloc_good_size(alloc));
diff --git a/src/core/SkVarAlloc.h b/src/core/SkVarAlloc.h
index 0a7864b37a..19b2d75e76 100644
--- a/src/core/SkVarAlloc.h
+++ b/src/core/SkVarAlloc.h
@@ -5,22 +5,21 @@
class SkVarAlloc : SkNoncopyable {
public:
- // SkVarAlloc will never allocate less than smallest bytes at a time.
- // When it allocates a new block, it will be at least growth times bigger than the last.
- SkVarAlloc(size_t smallest, float growth);
+ SkVarAlloc();
~SkVarAlloc();
// Returns contiguous bytes aligned at least for pointers. You may pass SK_MALLOC_THROW, etc.
char* alloc(size_t bytes, unsigned sk_malloc_flags) {
bytes = SkAlignPtr(bytes);
- if (fByte + bytes > fLimit) {
+ if (bytes > fRemaining) {
this->makeSpace(bytes, sk_malloc_flags);
}
- SkASSERT(fByte + bytes <= fLimit);
+ SkASSERT(bytes <= fRemaining);
char* ptr = fByte;
fByte += bytes;
+ fRemaining -= bytes;
return ptr;
}
@@ -28,10 +27,8 @@ private:
void makeSpace(size_t bytes, unsigned flags);
char* fByte;
- const char* fLimit;
-
- unsigned fSmallest;
- const float fGrowth;
+ unsigned fRemaining;
+ unsigned fLgMinSize;
struct Block;
Block* fBlock;
diff --git a/tests/RecordTest.cpp b/tests/RecordTest.cpp
index 2a0e615516..49efc28d2c 100644
--- a/tests/RecordTest.cpp
+++ b/tests/RecordTest.cpp
@@ -85,21 +85,15 @@ static bool is_aligned(const T* p) {
DEF_TEST(Record_Alignment, r) {
SkRecord record;
-
- // Of course a byte's always aligned.
REPORTER_ASSERT(r, is_aligned(record.alloc<uint8_t>()));
-
- // (If packed tightly, the rest below here would be off by one.)
-
- // It happens that the first implementation always aligned to 4 bytes,
- // so these two were always correct.
REPORTER_ASSERT(r, is_aligned(record.alloc<uint16_t>()));
REPORTER_ASSERT(r, is_aligned(record.alloc<uint32_t>()));
-
- // These two are regression tests (void* only on 64-bit machines).
- REPORTER_ASSERT(r, is_aligned(record.alloc<uint64_t>()));
REPORTER_ASSERT(r, is_aligned(record.alloc<void*>()));
- // We're not testing beyond sizeof(void*), which is where the current implementation will break.
+ // It's not clear if we care that 8-byte values are aligned on 32-bit machines.
+ if (sizeof(void*) == 8) {
+ REPORTER_ASSERT(r, is_aligned(record.alloc<double>()));
+ REPORTER_ASSERT(r, is_aligned(record.alloc<uint64_t>()));
+ }
}