OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2012 Google Inc. | 2 * Copyright 2012 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkChecksum_DEFINED | 8 #ifndef SkChecksum_DEFINED |
9 #define SkChecksum_DEFINED | 9 #define SkChecksum_DEFINED |
10 | 10 |
11 #include "SkString.h" | 11 #include "SkString.h" |
12 #include "SkTLogic.h" | 12 #include "SkTLogic.h" |
13 #include "SkTypes.h" | 13 #include "SkTypes.h" |
14 | 14 |
| 15 /** |
| 16 * Computes a 32bit checksum from a blob of 32bit aligned data. This is meant |
| 17 * to be very very fast, as it is used internally by the font cache, in |
| 18 * conjuction with the entire raw key. This algorithm does not generate |
| 19 * unique values as well as others (e.g. MD5) but it performs much faster. |
| 20 * Skia's use cases can survive non-unique values (since the entire key is |
| 21 * always available). Clients should only be used in circumstances where speed |
| 22 * over uniqueness is at a premium. |
| 23 */ |
15 class SkChecksum : SkNoncopyable { | 24 class SkChecksum : SkNoncopyable { |
| 25 private: |
| 26 /* |
| 27 * Our Rotate and Mash helpers are meant to automatically do the right |
| 28 * thing depending if sizeof(uintptr_t) is 4 or 8. |
| 29 */ |
| 30 enum { |
| 31 ROTR = 17, |
| 32 ROTL = sizeof(uintptr_t) * 8 - ROTR, |
| 33 HALFBITS = sizeof(uintptr_t) * 4 |
| 34 }; |
| 35 |
| 36 static inline uintptr_t Mash(uintptr_t total, uintptr_t value) { |
| 37 return ((total >> ROTR) | (total << ROTL)) ^ value; |
| 38 } |
| 39 |
16 public: | 40 public: |
17 /** | 41 /** |
18 * uint32_t -> uint32_t hash, useful for when you're about to trucate this h
ash but you | 42 * uint32_t -> uint32_t hash, useful for when you're about to trucate this h
ash but you |
19 * suspect its low bits aren't well mixed. | 43 * suspect its low bits aren't well mixed. |
20 * | 44 * |
21 * This is the Murmur3 finalizer. | 45 * This is the Murmur3 finalizer. |
22 */ | 46 */ |
23 static uint32_t Mix(uint32_t hash) { | 47 static uint32_t Mix(uint32_t hash) { |
24 hash ^= hash >> 16; | 48 hash ^= hash >> 16; |
25 hash *= 0x85ebca6b; | 49 hash *= 0x85ebca6b; |
(...skipping 11 matching lines...) Expand all Loading... |
37 */ | 61 */ |
38 static uint32_t CheapMix(uint32_t hash) { | 62 static uint32_t CheapMix(uint32_t hash) { |
39 hash ^= hash >> 16; | 63 hash ^= hash >> 16; |
40 hash *= 0x85ebca6b; | 64 hash *= 0x85ebca6b; |
41 hash ^= hash >> 16; | 65 hash ^= hash >> 16; |
42 return hash; | 66 return hash; |
43 } | 67 } |
44 | 68 |
45 /** | 69 /** |
46 * Calculate 32-bit Murmur hash (murmur3). | 70 * Calculate 32-bit Murmur hash (murmur3). |
| 71 * This should take 2-3x longer than SkChecksum::Compute, but is a considera
bly better hash. |
47 * See en.wikipedia.org/wiki/MurmurHash. | 72 * See en.wikipedia.org/wiki/MurmurHash. |
48 * | 73 * |
49 * @param data Memory address of the data block to be processed. | 74 * @param data Memory address of the data block to be processed. |
50 * @param size Size of the data block in bytes. | 75 * @param size Size of the data block in bytes. |
51 * @param seed Initial hash seed. (optional) | 76 * @param seed Initial hash seed. (optional) |
52 * @return hash result | 77 * @return hash result |
53 */ | 78 */ |
54 static uint32_t Murmur3(const void* data, size_t bytes, uint32_t seed=0); | 79 static uint32_t Murmur3(const void* data, size_t bytes, uint32_t seed=0); |
| 80 |
| 81 /** |
| 82 * Compute a 32-bit checksum for a given data block |
| 83 * |
| 84 * WARNING: this algorithm is tuned for efficiency, not backward/forward |
| 85 * compatibility. It may change at any time, so a checksum generated with |
| 86 * one version of the Skia code may not match a checksum generated with |
| 87 * a different version of the Skia code. |
| 88 * |
| 89 * @param data Memory address of the data block to be processed. Must be |
| 90 * 32-bit aligned. |
| 91 * @param size Size of the data block in bytes. Must be a multiple of 4. |
| 92 * @return checksum result |
| 93 */ |
| 94 static uint32_t Compute(const uint32_t* data, size_t size) { |
| 95 // Use may_alias to remind the compiler we're intentionally violating st
rict aliasing, |
| 96 // and so not to apply strict-aliasing-based optimizations. |
| 97 typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; |
| 98 const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; |
| 99 |
| 100 SkASSERT(SkIsAlign4(size)); |
| 101 |
| 102 /* |
| 103 * We want to let the compiler use 32bit or 64bit addressing and math |
| 104 * so we use uintptr_t as our magic type. This makes the code a little |
| 105 * more obscure (we can't hard-code 32 or 64 anywhere, but have to use |
| 106 * sizeof()). |
| 107 */ |
| 108 uintptr_t result = 0; |
| 109 const uintptr_t* ptr = reinterpret_cast<const uintptr_t*>(safe_data); |
| 110 |
| 111 /* |
| 112 * count the number of quad element chunks. This takes into account |
| 113 * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t) |
| 114 * to compute how much to shift-down the size. |
| 115 */ |
| 116 size_t n4 = size / (sizeof(uintptr_t) << 2); |
| 117 for (size_t i = 0; i < n4; ++i) { |
| 118 result = Mash(result, *ptr++); |
| 119 result = Mash(result, *ptr++); |
| 120 result = Mash(result, *ptr++); |
| 121 result = Mash(result, *ptr++); |
| 122 } |
| 123 size &= ((sizeof(uintptr_t) << 2) - 1); |
| 124 |
| 125 safe_data = reinterpret_cast<const aliased_uint32_t*>(ptr); |
| 126 const aliased_uint32_t* stop = safe_data + (size >> 2); |
| 127 while (safe_data < stop) { |
| 128 result = Mash(result, *safe_data++); |
| 129 } |
| 130 |
| 131 /* |
| 132 * smash us down to 32bits if we were 64. Note that when uintptr_t is |
| 133 * 32bits, this code-path should go away, but I still got a warning |
| 134 * when I wrote |
| 135 * result ^= result >> 32; |
| 136 * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS |
| 137 * define. |
| 138 */ |
| 139 if (8 == sizeof(result)) { |
| 140 result ^= result >> HALFBITS; |
| 141 } |
| 142 return static_cast<uint32_t>(result); |
| 143 } |
55 }; | 144 }; |
56 | 145 |
57 // SkGoodHash should usually be your first choice in hashing data. | 146 // SkGoodHash should usually be your first choice in hashing data. |
58 // It should be both reasonably fast and high quality. | 147 // It should be both reasonably fast and high quality. |
59 struct SkGoodHash { | 148 struct SkGoodHash { |
60 template <typename K> | 149 template <typename K> |
61 SK_WHEN(sizeof(K) == 4, uint32_t) operator()(const K& k) const { | 150 SK_WHEN(sizeof(K) == 4, uint32_t) operator()(const K& k) const { |
62 return SkChecksum::Mix(*(const uint32_t*)&k); | 151 return SkChecksum::Mix(*(const uint32_t*)&k); |
63 } | 152 } |
64 | 153 |
65 template <typename K> | 154 template <typename K> |
66 SK_WHEN(sizeof(K) != 4, uint32_t) operator()(const K& k) const { | 155 SK_WHEN(sizeof(K) != 4, uint32_t) operator()(const K& k) const { |
67 return SkChecksum::Murmur3(&k, sizeof(K)); | 156 return SkChecksum::Murmur3(&k, sizeof(K)); |
68 } | 157 } |
69 | 158 |
70 uint32_t operator()(const SkString& k) const { | 159 uint32_t operator()(const SkString& k) const { |
71 return SkChecksum::Murmur3(k.c_str(), k.size()); | 160 return SkChecksum::Murmur3(k.c_str(), k.size()); |
72 } | 161 } |
73 }; | 162 }; |
74 | 163 |
75 #endif | 164 #endif |
OLD | NEW |