aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/core/SkRecord.h2
-rw-r--r--src/core/SkVarAlloc.cpp13
-rw-r--r--src/core/SkVarAlloc.h15
-rw-r--r--tests/RecordTest.cpp16
4 files changed, 17 insertions, 29 deletions
diff --git a/src/core/SkRecord.h b/src/core/SkRecord.h
index 8302db4505..6019e03406 100644
--- a/src/core/SkRecord.h
+++ b/src/core/SkRecord.h
@@ -30,7 +30,7 @@ class SkRecord : SkNoncopyable {
kFirstReserveCount = 64 / sizeof(void*),
};
public:
- SkRecord() : fAlloc(1024, 2.0f), fCount(0), fReserved(0) {}
+ SkRecord() : fCount(0), fReserved(0) {}
~SkRecord() {
Destroyer destroyer;
diff --git a/src/core/SkVarAlloc.cpp b/src/core/SkVarAlloc.cpp
index 5c3a41c6cf..cc1fc1c734 100644
--- a/src/core/SkVarAlloc.cpp
+++ b/src/core/SkVarAlloc.cpp
@@ -1,5 +1,4 @@
#include "SkVarAlloc.h"
-#include "SkScalar.h"
// We use non-standard malloc diagnostic methods to make sure our allocations are sized well.
#if defined(SK_BUILD_FOR_MAC)
@@ -20,11 +19,10 @@ struct SkVarAlloc::Block {
}
};
-SkVarAlloc::SkVarAlloc(size_t smallest, float growth)
+SkVarAlloc::SkVarAlloc()
: fByte(NULL)
- , fLimit(NULL)
- , fSmallest(SkToUInt(smallest))
- , fGrowth(growth)
+ , fRemaining(0)
+ , fLgMinSize(4)
, fBlock(NULL) {}
SkVarAlloc::~SkVarAlloc() {
@@ -39,14 +37,13 @@ SkVarAlloc::~SkVarAlloc() {
void SkVarAlloc::makeSpace(size_t bytes, unsigned flags) {
SkASSERT(SkIsAlignPtr(bytes));
- size_t alloc = fSmallest;
+ size_t alloc = 1<<(fLgMinSize++);
while (alloc < bytes + sizeof(Block)) {
alloc *= 2;
}
fBlock = Block::Alloc(fBlock, alloc, flags);
fByte = fBlock->data();
- fLimit = fByte + alloc - sizeof(Block);
- fSmallest = SkToUInt(SkScalarTruncToInt(fSmallest * fGrowth));
+ fRemaining = alloc - sizeof(Block);
#if defined(SK_BUILD_FOR_MAC)
SkASSERT(alloc == malloc_good_size(alloc));
diff --git a/src/core/SkVarAlloc.h b/src/core/SkVarAlloc.h
index 0a7864b37a..19b2d75e76 100644
--- a/src/core/SkVarAlloc.h
+++ b/src/core/SkVarAlloc.h
@@ -5,22 +5,21 @@
class SkVarAlloc : SkNoncopyable {
public:
- // SkVarAlloc will never allocate less than smallest bytes at a time.
- // When it allocates a new block, it will be at least growth times bigger than the last.
- SkVarAlloc(size_t smallest, float growth);
+ SkVarAlloc();
~SkVarAlloc();
// Returns contiguous bytes aligned at least for pointers. You may pass SK_MALLOC_THROW, etc.
char* alloc(size_t bytes, unsigned sk_malloc_flags) {
bytes = SkAlignPtr(bytes);
- if (fByte + bytes > fLimit) {
+ if (bytes > fRemaining) {
this->makeSpace(bytes, sk_malloc_flags);
}
- SkASSERT(fByte + bytes <= fLimit);
+ SkASSERT(bytes <= fRemaining);
char* ptr = fByte;
fByte += bytes;
+ fRemaining -= bytes;
return ptr;
}
@@ -28,10 +27,8 @@ private:
void makeSpace(size_t bytes, unsigned flags);
char* fByte;
- const char* fLimit;
-
- unsigned fSmallest;
- const float fGrowth;
+ unsigned fRemaining;
+ unsigned fLgMinSize;
struct Block;
Block* fBlock;
diff --git a/tests/RecordTest.cpp b/tests/RecordTest.cpp
index 2a0e615516..49efc28d2c 100644
--- a/tests/RecordTest.cpp
+++ b/tests/RecordTest.cpp
@@ -85,21 +85,15 @@ static bool is_aligned(const T* p) {
DEF_TEST(Record_Alignment, r) {
SkRecord record;
-
- // Of course a byte's always aligned.
REPORTER_ASSERT(r, is_aligned(record.alloc<uint8_t>()));
-
- // (If packed tightly, the rest below here would be off by one.)
-
- // It happens that the first implementation always aligned to 4 bytes,
- // so these two were always correct.
REPORTER_ASSERT(r, is_aligned(record.alloc<uint16_t>()));
REPORTER_ASSERT(r, is_aligned(record.alloc<uint32_t>()));
-
- // These two are regression tests (void* only on 64-bit machines).
- REPORTER_ASSERT(r, is_aligned(record.alloc<uint64_t>()));
REPORTER_ASSERT(r, is_aligned(record.alloc<void*>()));
- // We're not testing beyond sizeof(void*), which is where the current implementation will break.
+ // It's not clear if we care that 8-byte values are aligned on 32-bit machines.
+ if (sizeof(void*) == 8) {
+ REPORTER_ASSERT(r, is_aligned(record.alloc<double>()));
+ REPORTER_ASSERT(r, is_aligned(record.alloc<uint64_t>()));
+ }
}