OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2012 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef SkChecksum_DEFINED | |
9 #define SkChecksum_DEFINED | |
10 | |
11 #include "SkString.h" | |
12 #include "SkTLogic.h" | |
13 #include "SkTypes.h" | |
14 | |
15 /** | |
16 * Computes a 32bit checksum from a blob of 32bit aligned data. This is meant | |
17 * to be very very fast, as it is used internally by the font cache, in | |
18 * conjuction with the entire raw key. This algorithm does not generate | |
19 * unique values as well as others (e.g. MD5) but it performs much faster. | |
20 * Skia's use cases can survive non-unique values (since the entire key is | |
21 * always available). Clients should only be used in circumstances where speed | |
22 * over uniqueness is at a premium. | |
23 */ | |
24 class SkChecksum : SkNoncopyable { | |
25 private: | |
26 /* | |
27 * Our Rotate and Mash helpers are meant to automatically do the right | |
28 * thing depending if sizeof(uintptr_t) is 4 or 8. | |
29 */ | |
30 enum { | |
31 ROTR = 17, | |
32 ROTL = sizeof(uintptr_t) * 8 - ROTR, | |
33 HALFBITS = sizeof(uintptr_t) * 4 | |
34 }; | |
35 | |
36 static inline uintptr_t Mash(uintptr_t total, uintptr_t value) { | |
37 return ((total >> ROTR) | (total << ROTL)) ^ value; | |
38 } | |
39 | |
40 public: | |
41 /** | |
42 * uint32_t -> uint32_t hash, useful for when you're about to trucate this h
ash but you | |
43 * suspect its low bits aren't well mixed. | |
44 * | |
45 * This is the Murmur3 finalizer. | |
46 */ | |
47 static uint32_t Mix(uint32_t hash) { | |
48 hash ^= hash >> 16; | |
49 hash *= 0x85ebca6b; | |
50 hash ^= hash >> 13; | |
51 hash *= 0xc2b2ae35; | |
52 hash ^= hash >> 16; | |
53 return hash; | |
54 } | |
55 | |
56 /** | |
57 * uint32_t -> uint32_t hash, useful for when you're about to trucate this h
ash but you | |
58 * suspect its low bits aren't well mixed. | |
59 * | |
60 * This version is 2-lines cheaper than Mix, but seems to be sufficient for
the font cache. | |
61 */ | |
62 static uint32_t CheapMix(uint32_t hash) { | |
63 hash ^= hash >> 16; | |
64 hash *= 0x85ebca6b; | |
65 hash ^= hash >> 16; | |
66 return hash; | |
67 } | |
68 | |
69 /** | |
70 * Calculate 32-bit Murmur hash (murmur3). | |
71 * This should take 2-3x longer than SkChecksum::Compute, but is a considera
bly better hash. | |
72 * See en.wikipedia.org/wiki/MurmurHash. | |
73 * | |
74 * @param data Memory address of the data block to be processed. | |
75 * @param size Size of the data block in bytes. | |
76 * @param seed Initial hash seed. (optional) | |
77 * @return hash result | |
78 */ | |
79 static uint32_t Murmur3(const void* data, size_t bytes, uint32_t seed=0) { | |
80 // Use may_alias to remind the compiler we're intentionally violating st
rict aliasing, | |
81 // and so not to apply strict-aliasing-based optimizations. | |
82 typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; | |
83 typedef uint8_t SK_ATTRIBUTE(may_alias) aliased_uint8_t; | |
84 | |
85 // Handle 4 bytes at a time while possible. | |
86 const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; | |
87 const size_t words = bytes/4; | |
88 uint32_t hash = seed; | |
89 for (size_t i = 0; i < words; i++) { | |
90 uint32_t k = safe_data[i]; | |
91 k *= 0xcc9e2d51; | |
92 k = (k << 15) | (k >> 17); | |
93 k *= 0x1b873593; | |
94 | |
95 hash ^= k; | |
96 hash = (hash << 13) | (hash >> 19); | |
97 hash *= 5; | |
98 hash += 0xe6546b64; | |
99 } | |
100 | |
101 // Handle last 0-3 bytes. | |
102 const aliased_uint8_t* safe_tail = (const uint8_t*)(safe_data + words); | |
103 uint32_t k = 0; | |
104 switch (bytes & 3) { | |
105 case 3: k ^= safe_tail[2] << 16; | |
106 case 2: k ^= safe_tail[1] << 8; | |
107 case 1: k ^= safe_tail[0] << 0; | |
108 k *= 0xcc9e2d51; | |
109 k = (k << 15) | (k >> 17); | |
110 k *= 0x1b873593; | |
111 hash ^= k; | |
112 } | |
113 | |
114 hash ^= bytes; | |
115 return Mix(hash); | |
116 } | |
117 | |
118 /** | |
119 * Compute a 32-bit checksum for a given data block | |
120 * | |
121 * WARNING: this algorithm is tuned for efficiency, not backward/forward | |
122 * compatibility. It may change at any time, so a checksum generated with | |
123 * one version of the Skia code may not match a checksum generated with | |
124 * a different version of the Skia code. | |
125 * | |
126 * @param data Memory address of the data block to be processed. Must be | |
127 * 32-bit aligned. | |
128 * @param size Size of the data block in bytes. Must be a multiple of 4. | |
129 * @return checksum result | |
130 */ | |
131 static uint32_t Compute(const uint32_t* data, size_t size) { | |
132 // Use may_alias to remind the compiler we're intentionally violating st
rict aliasing, | |
133 // and so not to apply strict-aliasing-based optimizations. | |
134 typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; | |
135 const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; | |
136 | |
137 SkASSERT(SkIsAlign4(size)); | |
138 | |
139 /* | |
140 * We want to let the compiler use 32bit or 64bit addressing and math | |
141 * so we use uintptr_t as our magic type. This makes the code a little | |
142 * more obscure (we can't hard-code 32 or 64 anywhere, but have to use | |
143 * sizeof()). | |
144 */ | |
145 uintptr_t result = 0; | |
146 const uintptr_t* ptr = reinterpret_cast<const uintptr_t*>(safe_data); | |
147 | |
148 /* | |
149 * count the number of quad element chunks. This takes into account | |
150 * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t) | |
151 * to compute how much to shift-down the size. | |
152 */ | |
153 size_t n4 = size / (sizeof(uintptr_t) << 2); | |
154 for (size_t i = 0; i < n4; ++i) { | |
155 result = Mash(result, *ptr++); | |
156 result = Mash(result, *ptr++); | |
157 result = Mash(result, *ptr++); | |
158 result = Mash(result, *ptr++); | |
159 } | |
160 size &= ((sizeof(uintptr_t) << 2) - 1); | |
161 | |
162 safe_data = reinterpret_cast<const aliased_uint32_t*>(ptr); | |
163 const aliased_uint32_t* stop = safe_data + (size >> 2); | |
164 while (safe_data < stop) { | |
165 result = Mash(result, *safe_data++); | |
166 } | |
167 | |
168 /* | |
169 * smash us down to 32bits if we were 64. Note that when uintptr_t is | |
170 * 32bits, this code-path should go away, but I still got a warning | |
171 * when I wrote | |
172 * result ^= result >> 32; | |
173 * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS | |
174 * define. | |
175 */ | |
176 if (8 == sizeof(result)) { | |
177 result ^= result >> HALFBITS; | |
178 } | |
179 return static_cast<uint32_t>(result); | |
180 } | |
181 }; | |
182 | |
183 // SkGoodHash should usually be your first choice in hashing data. | |
184 // It should be both reasonably fast and high quality. | |
185 | |
186 template <typename K> | |
187 uint32_t SkGoodHash(const K& k) { | |
188 if (sizeof(K) == 4) { | |
189 return SkChecksum::Mix(*(const uint32_t*)&k); | |
190 } | |
191 return SkChecksum::Murmur3(&k, sizeof(K)); | |
192 } | |
193 | |
194 inline uint32_t SkGoodHash(const SkString& k) { | |
195 return SkChecksum::Murmur3(k.c_str(), k.size()); | |
196 } | |
197 | |
198 #endif | |
OLD | NEW |