diff options
author | 2015-11-16 11:01:18 -0800 | |
---|---|---|
committer | 2015-11-16 11:01:18 -0800 | |
commit | 3c2d32b8e27820a6e149d9ded67cbdf2411cc5c9 (patch) | |
tree | be93bf1035eb199e1c9df4ffbe8b8961eb9ecba0 /include/private/SkChecksum.h | |
parent | 05b48e2a24ae78312c80ed94195f0b705fdb0547 (diff) |
Revert of Switch uses of SkChecksum::Compute to Murmur3. (patchset #2 id:20001 of https://codereview.chromium.org/1436973003/ )
Reason for revert:
gotta put back *compute = 0.
Original issue's description:
> Switch uses of SkChecksum::Compute to Murmur3.
>
> SkChecksum::Compute is a very, very poorly distributed hash function.
> This replaces all remaining uses with Murmur3.
>
> The only interesting stuff is in src/gpu.
>
> BUG=skia:
>
> Committed: https://skia.googlesource.com/skia/+/1d024a3c909ae5cefa5e8b339e2b52dc73ee85ac
>
> Committed: https://skia.googlesource.com/skia/+/540e95483d285b555e9b1a73d18c16e7d7c0deba
TBR=bsalomon@google.com,mtklein@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/1448023005
Diffstat (limited to 'include/private/SkChecksum.h')
-rw-r--r-- | include/private/SkChecksum.h | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/include/private/SkChecksum.h b/include/private/SkChecksum.h index 6289a444ae..4526416fc1 100644 --- a/include/private/SkChecksum.h +++ b/include/private/SkChecksum.h @@ -12,7 +12,31 @@ #include "SkTLogic.h" #include "SkTypes.h" +/** + * Computes a 32bit checksum from a blob of 32bit aligned data. This is meant + * to be very very fast, as it is used internally by the font cache, in + * conjuction with the entire raw key. This algorithm does not generate + * unique values as well as others (e.g. MD5) but it performs much faster. + * Skia's use cases can survive non-unique values (since the entire key is + * always available). Clients should only be used in circumstances where speed + * over uniqueness is at a premium. + */ class SkChecksum : SkNoncopyable { +private: + /* + * Our Rotate and Mash helpers are meant to automatically do the right + * thing depending if sizeof(uintptr_t) is 4 or 8. + */ + enum { + ROTR = 17, + ROTL = sizeof(uintptr_t) * 8 - ROTR, + HALFBITS = sizeof(uintptr_t) * 4 + }; + + static inline uintptr_t Mash(uintptr_t total, uintptr_t value) { + return ((total >> ROTR) | (total << ROTL)) ^ value; + } + public: /** * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you @@ -44,6 +68,7 @@ public: /** * Calculate 32-bit Murmur hash (murmur3). + * This should take 2-3x longer than SkChecksum::Compute, but is a considerably better hash. * See en.wikipedia.org/wiki/MurmurHash. * * @param data Memory address of the data block to be processed. @@ -52,6 +77,70 @@ public: * @return hash result */ static uint32_t Murmur3(const void* data, size_t bytes, uint32_t seed=0); + + /** + * Compute a 32-bit checksum for a given data block + * + * WARNING: this algorithm is tuned for efficiency, not backward/forward + * compatibility. It may change at any time, so a checksum generated with + * one version of the Skia code may not match a checksum generated with + * a different version of the Skia code. + * + * @param data Memory address of the data block to be processed. Must be + * 32-bit aligned. + * @param size Size of the data block in bytes. Must be a multiple of 4. + * @return checksum result + */ + static uint32_t Compute(const uint32_t* data, size_t size) { + // Use may_alias to remind the compiler we're intentionally violating strict aliasing, + // and so not to apply strict-aliasing-based optimizations. + typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; + const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; + + SkASSERT(SkIsAlign4(size)); + + /* + * We want to let the compiler use 32bit or 64bit addressing and math + * so we use uintptr_t as our magic type. This makes the code a little + * more obscure (we can't hard-code 32 or 64 anywhere, but have to use + * sizeof()). + */ + uintptr_t result = 0; + const uintptr_t* ptr = reinterpret_cast<const uintptr_t*>(safe_data); + + /* + * count the number of quad element chunks. This takes into account + * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t) + * to compute how much to shift-down the size. + */ + size_t n4 = size / (sizeof(uintptr_t) << 2); + for (size_t i = 0; i < n4; ++i) { + result = Mash(result, *ptr++); + result = Mash(result, *ptr++); + result = Mash(result, *ptr++); + result = Mash(result, *ptr++); + } + size &= ((sizeof(uintptr_t) << 2) - 1); + + safe_data = reinterpret_cast<const aliased_uint32_t*>(ptr); + const aliased_uint32_t* stop = safe_data + (size >> 2); + while (safe_data < stop) { + result = Mash(result, *safe_data++); + } + + /* + * smash us down to 32bits if we were 64. Note that when uintptr_t is + * 32bits, this code-path should go away, but I still got a warning + * when I wrote + * result ^= result >> 32; + * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS + * define. + */ + if (8 == sizeof(result)) { + result ^= result >> HALFBITS; + } + return static_cast<uint32_t>(result); + } }; // SkGoodHash should usually be your first choice in hashing data. |