Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(226)

Side by Side Diff: third_party/WebKit/Source/wtf/HashTable.h

Issue 1611343002: wtf reformat test Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: pydent Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserv ed. 2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserv ed.
3 * Copyright (C) 2008 David Levin <levin@chromium.org> 3 * Copyright (C) 2008 David Levin <levin@chromium.org>
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public 6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either 7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version. 8 * version 2 of the License, or (at your option) any later version.
9 * 9 *
10 * This library is distributed in the hope that it will be useful, 10 * This library is distributed in the hope that it will be useful,
(...skipping 18 matching lines...) Expand all
29 29
30 #define DUMP_HASHTABLE_STATS 0 30 #define DUMP_HASHTABLE_STATS 0
31 #define DUMP_HASHTABLE_STATS_PER_TABLE 0 31 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
32 32
33 #if DUMP_HASHTABLE_STATS_PER_TABLE 33 #if DUMP_HASHTABLE_STATS_PER_TABLE
34 #include "wtf/DataLog.h" 34 #include "wtf/DataLog.h"
35 #endif 35 #endif
36 36
37 #if DUMP_HASHTABLE_STATS 37 #if DUMP_HASHTABLE_STATS
38 #if DUMP_HASHTABLE_STATS_PER_TABLE 38 #if DUMP_HASHTABLE_STATS_PER_TABLE
39 #define UPDATE_PROBE_COUNTS() \ 39 #define UPDATE_PROBE_COUNTS() \
40 ++probeCount; \ 40 ++probeCount; \
41 HashTableStats::recordCollisionAtCount(probeCount); \ 41 HashTableStats::recordCollisionAtCount(probeCount); \
42 ++perTableProbeCount; \ 42 ++perTableProbeCount; \
43 m_stats->recordCollisionAtCount(perTableProbeCount) 43 m_stats->recordCollisionAtCount(perTableProbeCount)
44 #define UPDATE_ACCESS_COUNTS() \ 44 #define UPDATE_ACCESS_COUNTS() \
45 atomicIncrement(&HashTableStats::numAccesses); \ 45 atomicIncrement(&HashTableStats::numAccesses); \
46 int probeCount = 0; \ 46 int probeCount = 0; \
47 ++m_stats->numAccesses; \ 47 ++m_stats->numAccesses; \
48 int perTableProbeCount = 0 48 int perTableProbeCount = 0
49 #else 49 #else
50 #define UPDATE_PROBE_COUNTS() \ 50 #define UPDATE_PROBE_COUNTS() \
51 ++probeCount; \ 51 ++probeCount; \
52 HashTableStats::recordCollisionAtCount(probeCount) 52 HashTableStats::recordCollisionAtCount(probeCount)
53 #define UPDATE_ACCESS_COUNTS() \ 53 #define UPDATE_ACCESS_COUNTS() \
54 atomicIncrement(&HashTableStats::numAccesses); \ 54 atomicIncrement(&HashTableStats::numAccesses); \
55 int probeCount = 0 55 int probeCount = 0
56 #endif 56 #endif
57 #else 57 #else
58 #if DUMP_HASHTABLE_STATS_PER_TABLE 58 #if DUMP_HASHTABLE_STATS_PER_TABLE
59 #define UPDATE_PROBE_COUNTS() \ 59 #define UPDATE_PROBE_COUNTS() \
60 ++perTableProbeCount; \ 60 ++perTableProbeCount; \
61 m_stats->recordCollisionAtCount(perTableProbeCount) 61 m_stats->recordCollisionAtCount(perTableProbeCount)
62 #define UPDATE_ACCESS_COUNTS() \ 62 #define UPDATE_ACCESS_COUNTS() \
63 ++m_stats->numAccesses; \ 63 ++m_stats->numAccesses; \
64 int perTableProbeCount = 0 64 int perTableProbeCount = 0
65 #else 65 #else
66 #define UPDATE_PROBE_COUNTS() do { } while (0) 66 #define UPDATE_PROBE_COUNTS() \
67 #define UPDATE_ACCESS_COUNTS() do { } while (0) 67 do { \
68 } while (0)
69 #define UPDATE_ACCESS_COUNTS() \
70 do { \
71 } while (0)
68 #endif 72 #endif
69 #endif 73 #endif
70 74
71 namespace WTF { 75 namespace WTF {
72 76
73 #if DUMP_HASHTABLE_STATS 77 #if DUMP_HASHTABLE_STATS
74 78
75 struct HashTableStats { 79 struct HashTableStats {
76 STATIC_ONLY(HashTableStats); 80 STATIC_ONLY(HashTableStats);
77 // The following variables are all atomically incremented when modified. 81 // The following variables are all atomically incremented when modified.
78 static int numAccesses; 82 static int numAccesses;
79 static int numRehashes; 83 static int numRehashes;
80 static int numRemoves; 84 static int numRemoves;
81 static int numReinserts; 85 static int numReinserts;
82 86
83 // The following variables are only modified in the recordCollisionAtCount 87 // The following variables are only modified in the recordCollisionAtCount
84 // method within a mutex. 88 // method within a mutex.
85 static int maxCollisions; 89 static int maxCollisions;
86 static int numCollisions; 90 static int numCollisions;
87 static int collisionGraph[4096]; 91 static int collisionGraph[4096];
88 92
89 static void recordCollisionAtCount(int count); 93 static void recordCollisionAtCount(int count);
90 static void dumpStats(); 94 static void dumpStats();
91 }; 95 };
92 96
93 #endif 97 #endif
94 98
95 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 99 template <typename Key,
100 typename Value,
101 typename Extractor,
102 typename HashFunctions,
103 typename Traits,
104 typename KeyTraits,
105 typename Allocator>
96 class HashTable; 106 class HashTable;
97 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 107 template <typename Key,
108 typename Value,
109 typename Extractor,
110 typename HashFunctions,
111 typename Traits,
112 typename KeyTraits,
113 typename Allocator>
98 class HashTableIterator; 114 class HashTableIterator;
99 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 115 template <typename Key,
116 typename Value,
117 typename Extractor,
118 typename HashFunctions,
119 typename Traits,
120 typename KeyTraits,
121 typename Allocator>
100 class HashTableConstIterator; 122 class HashTableConstIterator;
101 template <typename Value, typename HashFunctions, typename HashTraits, typename Allocator> 123 template <typename Value,
124 typename HashFunctions,
125 typename HashTraits,
126 typename Allocator>
102 class LinkedHashSet; 127 class LinkedHashSet;
103 template <WeakHandlingFlag x, typename T, typename U, typename V, typename W, ty pename X, typename Y, typename Z> 128 template <WeakHandlingFlag x,
129 typename T,
130 typename U,
131 typename V,
132 typename W,
133 typename X,
134 typename Y,
135 typename Z>
104 struct WeakProcessingHashTableHelper; 136 struct WeakProcessingHashTableHelper;
105 137
106 typedef enum { HashItemKnownGood } HashItemKnownGoodTag; 138 typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
107 139
108 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 140 template <typename Key,
141 typename Value,
142 typename Extractor,
143 typename HashFunctions,
144 typename Traits,
145 typename KeyTraits,
146 typename Allocator>
109 class HashTableConstIterator final { 147 class HashTableConstIterator final {
110 DISALLOW_NEW(); 148 DISALLOW_NEW();
111 private: 149
112 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator> HashTableType; 150 private:
113 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator> iterator; 151 typedef HashTable<Key,
114 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator; 152 Value,
115 typedef Value ValueType; 153 Extractor,
116 typedef typename Traits::IteratorConstGetType GetType; 154 HashFunctions,
117 typedef const ValueType* PointerType; 155 Traits,
118 156 KeyTraits,
119 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrai ts, Allocator>; 157 Allocator>
120 friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>; 158 HashTableType;
121 159 typedef HashTableIterator<Key,
122 void skipEmptyBuckets() 160 Value,
123 { 161 Extractor,
124 while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBuc ket(*m_position)) 162 HashFunctions,
125 ++m_position; 163 Traits,
126 } 164 KeyTraits,
127 165 Allocator>
128 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container) 166 iterator;
129 : m_position(position) 167 typedef HashTableConstIterator<Key,
130 , m_endPosition(endPosition) 168 Value,
169 Extractor,
170 HashFunctions,
171 Traits,
172 KeyTraits,
173 Allocator>
174 const_iterator;
175 typedef Value ValueType;
176 typedef typename Traits::IteratorConstGetType GetType;
177 typedef const ValueType* PointerType;
178
179 friend class HashTable<Key,
180 Value,
181 Extractor,
182 HashFunctions,
183 Traits,
184 KeyTraits,
185 Allocator>;
186 friend class HashTableIterator<Key,
187 Value,
188 Extractor,
189 HashFunctions,
190 Traits,
191 KeyTraits,
192 Allocator>;
193
194 void skipEmptyBuckets() {
195 while (m_position != m_endPosition &&
196 HashTableType::isEmptyOrDeletedBucket(*m_position))
197 ++m_position;
198 }
199
200 HashTableConstIterator(PointerType position,
201 PointerType endPosition,
202 const HashTableType* container)
203 : m_position(position),
204 m_endPosition(endPosition)
131 #if ENABLE(ASSERT) 205 #if ENABLE(ASSERT)
132 , m_container(container) 206 ,
133 , m_containerModifications(container->modifications()) 207 m_container(container),
134 #endif 208 m_containerModifications(container->modifications())
135 { 209 #endif
136 skipEmptyBuckets(); 210 {
137 } 211 skipEmptyBuckets();
138 212 }
139 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container, HashItemKnownGoodTag) 213
140 : m_position(position) 214 HashTableConstIterator(PointerType position,
141 , m_endPosition(endPosition) 215 PointerType endPosition,
216 const HashTableType* container,
217 HashItemKnownGoodTag)
218 : m_position(position),
219 m_endPosition(endPosition)
142 #if ENABLE(ASSERT) 220 #if ENABLE(ASSERT)
143 , m_container(container) 221 ,
144 , m_containerModifications(container->modifications()) 222 m_container(container),
145 #endif 223 m_containerModifications(container->modifications())
146 { 224 #endif
147 ASSERT(m_containerModifications == m_container->modifications()); 225 {
148 } 226 ASSERT(m_containerModifications == m_container->modifications());
149 227 }
150 void checkModifications() const 228
151 { 229 void checkModifications() const {
152 // HashTable and collections that build on it do not support 230 // HashTable and collections that build on it do not support
153 // modifications while there is an iterator in use. The exception is 231 // modifications while there is an iterator in use. The exception is
154 // ListHashSet, which has its own iterators that tolerate modification 232 // ListHashSet, which has its own iterators that tolerate modification
155 // of the underlying set. 233 // of the underlying set.
156 ASSERT(m_containerModifications == m_container->modifications()); 234 ASSERT(m_containerModifications == m_container->modifications());
157 ASSERT(!m_container->accessForbidden()); 235 ASSERT(!m_container->accessForbidden());
158 } 236 }
159 237
160 public: 238 public:
161 HashTableConstIterator() {} 239 HashTableConstIterator() {}
162 240
163 GetType get() const 241 GetType get() const {
164 { 242 checkModifications();
165 checkModifications(); 243 return m_position;
166 return m_position; 244 }
167 } 245 typename Traits::IteratorConstReferenceType operator*() const {
168 typename Traits::IteratorConstReferenceType operator*() const { return Trait s::getToReferenceConstConversion(get()); } 246 return Traits::getToReferenceConstConversion(get());
169 GetType operator->() const { return get(); } 247 }
170 248 GetType operator->() const { return get(); }
171 const_iterator& operator++() 249
172 { 250 const_iterator& operator++() {
173 ASSERT(m_position != m_endPosition); 251 ASSERT(m_position != m_endPosition);
174 checkModifications(); 252 checkModifications();
175 ++m_position; 253 ++m_position;
176 skipEmptyBuckets(); 254 skipEmptyBuckets();
177 return *this; 255 return *this;
178 } 256 }
179 257
180 // postfix ++ intentionally omitted 258 // postfix ++ intentionally omitted
181 259
182 // Comparison. 260 // Comparison.
183 bool operator==(const const_iterator& other) const 261 bool operator==(const const_iterator& other) const {
184 { 262 return m_position == other.m_position;
185 return m_position == other.m_position; 263 }
186 } 264 bool operator!=(const const_iterator& other) const {
187 bool operator!=(const const_iterator& other) const 265 return m_position != other.m_position;
188 { 266 }
189 return m_position != other.m_position; 267 bool operator==(const iterator& other) const {
190 } 268 return *this == static_cast<const_iterator>(other);
191 bool operator==(const iterator& other) const 269 }
192 { 270 bool operator!=(const iterator& other) const {
193 return *this == static_cast<const_iterator>(other); 271 return *this != static_cast<const_iterator>(other);
194 } 272 }
195 bool operator!=(const iterator& other) const 273
196 { 274 private:
197 return *this != static_cast<const_iterator>(other); 275 PointerType m_position;
198 } 276 PointerType m_endPosition;
199
200 private:
201 PointerType m_position;
202 PointerType m_endPosition;
203 #if ENABLE(ASSERT) 277 #if ENABLE(ASSERT)
204 const HashTableType* m_container; 278 const HashTableType* m_container;
205 int64_t m_containerModifications; 279 int64_t m_containerModifications;
206 #endif 280 #endif
207 }; 281 };
208 282
209 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 283 template <typename Key,
284 typename Value,
285 typename Extractor,
286 typename HashFunctions,
287 typename Traits,
288 typename KeyTraits,
289 typename Allocator>
210 class HashTableIterator final { 290 class HashTableIterator final {
211 DISALLOW_NEW(); 291 DISALLOW_NEW();
212 private: 292
213 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator> HashTableType; 293 private:
214 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator> iterator; 294 typedef HashTable<Key,
215 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator; 295 Value,
216 typedef Value ValueType; 296 Extractor,
217 typedef typename Traits::IteratorGetType GetType; 297 HashFunctions,
218 typedef ValueType* PointerType; 298 Traits,
219 299 KeyTraits,
220 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrai ts, Allocator>; 300 Allocator>
221 301 HashTableType;
222 HashTableIterator(PointerType pos, PointerType end, const HashTableType* con tainer) : m_iterator(pos, end, container) {} 302 typedef HashTableIterator<Key,
223 HashTableIterator(PointerType pos, PointerType end, const HashTableType* con tainer, HashItemKnownGoodTag tag) : m_iterator(pos, end, container, tag) {} 303 Value,
224 304 Extractor,
225 public: 305 HashFunctions,
226 HashTableIterator() {} 306 Traits,
227 307 KeyTraits,
228 // default copy, assignment and destructor are OK 308 Allocator>
229 309 iterator;
230 GetType get() const { return const_cast<GetType>(m_iterator.get()); } 310 typedef HashTableConstIterator<Key,
231 typename Traits::IteratorReferenceType operator*() const { return Traits::ge tToReferenceConversion(get()); } 311 Value,
232 GetType operator->() const { return get(); } 312 Extractor,
233 313 HashFunctions,
234 iterator& operator++() { ++m_iterator; return *this; } 314 Traits,
235 315 KeyTraits,
236 // postfix ++ intentionally omitted 316 Allocator>
237 317 const_iterator;
238 // Comparison. 318 typedef Value ValueType;
239 bool operator==(const iterator& other) const { return m_iterator == other.m_ iterator; } 319 typedef typename Traits::IteratorGetType GetType;
240 bool operator!=(const iterator& other) const { return m_iterator != other.m_ iterator; } 320 typedef ValueType* PointerType;
241 bool operator==(const const_iterator& other) const { return m_iterator == ot her; } 321
242 bool operator!=(const const_iterator& other) const { return m_iterator != ot her; } 322 friend class HashTable<Key,
243 323 Value,
244 operator const_iterator() const { return m_iterator; } 324 Extractor,
245 325 HashFunctions,
246 private: 326 Traits,
247 const_iterator m_iterator; 327 KeyTraits,
328 Allocator>;
329
330 HashTableIterator(PointerType pos,
331 PointerType end,
332 const HashTableType* container)
333 : m_iterator(pos, end, container) {}
334 HashTableIterator(PointerType pos,
335 PointerType end,
336 const HashTableType* container,
337 HashItemKnownGoodTag tag)
338 : m_iterator(pos, end, container, tag) {}
339
340 public:
341 HashTableIterator() {}
342
343 // default copy, assignment and destructor are OK
344
345 GetType get() const { return const_cast<GetType>(m_iterator.get()); }
346 typename Traits::IteratorReferenceType operator*() const {
347 return Traits::getToReferenceConversion(get());
348 }
349 GetType operator->() const { return get(); }
350
351 iterator& operator++() {
352 ++m_iterator;
353 return *this;
354 }
355
356 // postfix ++ intentionally omitted
357
358 // Comparison.
359 bool operator==(const iterator& other) const {
360 return m_iterator == other.m_iterator;
361 }
362 bool operator!=(const iterator& other) const {
363 return m_iterator != other.m_iterator;
364 }
365 bool operator==(const const_iterator& other) const {
366 return m_iterator == other;
367 }
368 bool operator!=(const const_iterator& other) const {
369 return m_iterator != other;
370 }
371
372 operator const_iterator() const { return m_iterator; }
373
374 private:
375 const_iterator m_iterator;
248 }; 376 };
249 377
250 using std::swap; 378 using std::swap;
251 379
252 // Work around MSVC's standard library, whose swap for pairs does not swap by co mponent. 380 // Work around MSVC's standard library, whose swap for pairs does not swap by co mponent.
253 template <typename T> inline void hashTableSwap(T& a, T& b) 381 template <typename T>
254 { 382 inline void hashTableSwap(T& a, T& b) {
255 swap(a, b); 383 swap(a, b);
256 } 384 }
257 385
258 template <typename T, typename U> inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b) 386 template <typename T, typename U>
259 { 387 inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b) {
260 swap(a.key, b.key); 388 swap(a.key, b.key);
261 swap(a.value, b.value); 389 swap(a.value, b.value);
262 } 390 }
263 391
264 template <typename T, typename Allocator, bool useSwap = !IsTriviallyDestructibl e<T>::value> 392 template <typename T,
393 typename Allocator,
394 bool useSwap = !IsTriviallyDestructible<T>::value>
265 struct Mover; 395 struct Mover;
266 template <typename T, typename Allocator> struct Mover<T, Allocator, true> { 396 template <typename T, typename Allocator>
267 STATIC_ONLY(Mover); 397 struct Mover<T, Allocator, true> {
268 static void move(T& from, T& to) 398 STATIC_ONLY(Mover);
269 { 399 static void move(T& from, T& to) {
270 // The key and value cannot be swapped atomically, and it would be wrong 400 // The key and value cannot be swapped atomically, and it would be wrong
271 // to have a GC when only one was swapped and the other still contained 401 // to have a GC when only one was swapped and the other still contained
272 // garbage (eg. from a previous use of the same slot). Therefore we 402 // garbage (eg. from a previous use of the same slot). Therefore we
273 // forbid a GC until both the key and the value are swapped. 403 // forbid a GC until both the key and the value are swapped.
274 Allocator::enterGCForbiddenScope(); 404 Allocator::enterGCForbiddenScope();
275 hashTableSwap(from, to); 405 hashTableSwap(from, to);
276 Allocator::leaveGCForbiddenScope(); 406 Allocator::leaveGCForbiddenScope();
277 } 407 }
278 }; 408 };
279 409
280 template <typename T, typename Allocator> struct Mover<T, Allocator, false> { 410 template <typename T, typename Allocator>
281 STATIC_ONLY(Mover); 411 struct Mover<T, Allocator, false> {
282 static void move(T& from, T& to) { to = from; } 412 STATIC_ONLY(Mover);
413 static void move(T& from, T& to) { to = from; }
283 }; 414 };
284 415
285 template <typename HashFunctions> class IdentityHashTranslator { 416 template <typename HashFunctions>
286 STATIC_ONLY(IdentityHashTranslator); 417 class IdentityHashTranslator {
287 public: 418 STATIC_ONLY(IdentityHashTranslator);
288 template <typename T> static unsigned hash(const T& key) { return HashFuncti ons::hash(key); } 419
289 template <typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); } 420 public:
290 template <typename T, typename U, typename V> static void translate(T& locat ion, const U&, const V& value) { location = value; } 421 template <typename T>
422 static unsigned hash(const T& key) {
423 return HashFunctions::hash(key);
424 }
425 template <typename T, typename U>
426 static bool equal(const T& a, const U& b) {
427 return HashFunctions::equal(a, b);
428 }
429 template <typename T, typename U, typename V>
430 static void translate(T& location, const U&, const V& value) {
431 location = value;
432 }
291 }; 433 };
292 434
293 template <typename HashTableType, typename ValueType> struct HashTableAddResult final { 435 template <typename HashTableType, typename ValueType>
294 STACK_ALLOCATED(); 436 struct HashTableAddResult final {
295 HashTableAddResult(const HashTableType* container, ValueType* storedValue, b ool isNewEntry) 437 STACK_ALLOCATED();
296 : storedValue(storedValue) 438 HashTableAddResult(const HashTableType* container,
297 , isNewEntry(isNewEntry) 439 ValueType* storedValue,
440 bool isNewEntry)
441 : storedValue(storedValue),
442 isNewEntry(isNewEntry)
298 #if ENABLE(SECURITY_ASSERT) 443 #if ENABLE(SECURITY_ASSERT)
299 , m_container(container) 444 ,
300 , m_containerModifications(container->modifications()) 445 m_container(container),
301 #endif 446 m_containerModifications(container->modifications())
302 { 447 #endif
303 ASSERT_UNUSED(container, container); 448 {
304 } 449 ASSERT_UNUSED(container, container);
305 450 }
306 ValueType* storedValue; 451
307 bool isNewEntry; 452 ValueType* storedValue;
453 bool isNewEntry;
308 454
309 #if ENABLE(SECURITY_ASSERT) 455 #if ENABLE(SECURITY_ASSERT)
310 ~HashTableAddResult() 456 ~HashTableAddResult() {
311 { 457 // If rehash happened before accessing storedValue, it's
312 // If rehash happened before accessing storedValue, it's 458 // use-after-free. Any modification may cause a rehash, so we check for
313 // use-after-free. Any modification may cause a rehash, so we check for 459 // modifications here.
314 // modifications here. 460
315 461 // Rehash after accessing storedValue is harmless but will assert if the
316 // Rehash after accessing storedValue is harmless but will assert if the 462 // AddResult destructor takes place after a modification. You may need
317 // AddResult destructor takes place after a modification. You may need 463 // to limit the scope of the AddResult.
318 // to limit the scope of the AddResult. 464 ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications ==
319 ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container ->modifications()); 465 m_container->modifications());
320 } 466 }
321 467
322 private: 468 private:
323 const HashTableType* m_container; 469 const HashTableType* m_container;
324 const int64_t m_containerModifications; 470 const int64_t m_containerModifications;
325 #endif 471 #endif
326 }; 472 };
327 473
328 template <typename Value, typename Extractor, typename KeyTraits> 474 template <typename Value, typename Extractor, typename KeyTraits>
329 struct HashTableHelper { 475 struct HashTableHelper {
330 STATIC_ONLY(HashTableHelper); 476 STATIC_ONLY(HashTableHelper);
331 static bool isEmptyBucket(const Value& value) { return isHashTraitsEmptyValu e<KeyTraits>(Extractor::extract(value)); } 477 static bool isEmptyBucket(const Value& value) {
332 static bool isDeletedBucket(const Value& value) { return KeyTraits::isDelete dValue(Extractor::extract(value)); } 478 return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value));
333 static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucke t(value) || isDeletedBucket(value); } 479 }
480 static bool isDeletedBucket(const Value& value) {
481 return KeyTraits::isDeletedValue(Extractor::extract(value));
482 }
483 static bool isEmptyOrDeletedBucket(const Value& value) {
484 return isEmptyBucket(value) || isDeletedBucket(value);
485 }
334 }; 486 };
335 487
336 template <typename HashTranslator, typename KeyTraits, bool safeToCompareToEmpty OrDeleted> 488 template <typename HashTranslator,
489 typename KeyTraits,
490 bool safeToCompareToEmptyOrDeleted>
337 struct HashTableKeyChecker { 491 struct HashTableKeyChecker {
338 STATIC_ONLY(HashTableKeyChecker); 492 STATIC_ONLY(HashTableKeyChecker);
339 // There's no simple generic way to make this check if 493 // There's no simple generic way to make this check if
340 // safeToCompareToEmptyOrDeleted is false, so the check always passes. 494 // safeToCompareToEmptyOrDeleted is false, so the check always passes.
341 template <typename T> 495 template <typename T>
342 static bool checkKey(const T&) { return true; } 496 static bool checkKey(const T&) {
497 return true;
498 }
343 }; 499 };
344 500
345 template <typename HashTranslator, typename KeyTraits> 501 template <typename HashTranslator, typename KeyTraits>
346 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> { 502 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
347 STATIC_ONLY(HashTableKeyChecker); 503 STATIC_ONLY(HashTableKeyChecker);
348 template <typename T> 504 template <typename T>
349 static bool checkKey(const T& key) 505 static bool checkKey(const T& key) {
350 { 506 // FIXME : Check also equality to the deleted value.
351 // FIXME : Check also equality to the deleted value. 507 return !HashTranslator::equal(KeyTraits::emptyValue(), key);
352 return !HashTranslator::equal(KeyTraits::emptyValue(), key); 508 }
353 }
354 }; 509 };
355 510
356 // Note: empty or deleted key values are not allowed, using them may lead to 511 // Note: empty or deleted key values are not allowed, using them may lead to
357 // undefined behavior. For pointer keys this means that null pointers are not 512 // undefined behavior. For pointer keys this means that null pointers are not
358 // allowed unless you supply custom key traits. 513 // allowed unless you supply custom key traits.
359 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 514 template <typename Key,
360 class HashTable final : public ConditionalDestructor<HashTable<Key, Value, Extra ctor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollecte d> { 515 typename Value,
361 DISALLOW_NEW(); 516 typename Extractor,
362 public: 517 typename HashFunctions,
363 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator> iterator; 518 typename Traits,
364 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator; 519 typename KeyTraits,
365 typedef Traits ValueTraits; 520 typename Allocator>
366 typedef Key KeyType; 521 class HashTable final
367 typedef typename KeyTraits::PeekInType KeyPeekInType; 522 : public ConditionalDestructor<HashTable<Key,
368 typedef typename KeyTraits::PassInType KeyPassInType; 523 Value,
369 typedef Value ValueType; 524 Extractor,
370 typedef Extractor ExtractorType; 525 HashFunctions,
371 typedef KeyTraits KeyTraitsType; 526 Traits,
372 typedef typename Traits::PassInType ValuePassInType; 527 KeyTraits,
373 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType; 528 Allocator>,
374 typedef HashTableAddResult<HashTable, ValueType> AddResult; 529 Allocator::isGarbageCollected> {
530 DISALLOW_NEW();
531
532 public:
533 typedef HashTableIterator<Key,
534 Value,
535 Extractor,
536 HashFunctions,
537 Traits,
538 KeyTraits,
539 Allocator>
540 iterator;
541 typedef HashTableConstIterator<Key,
542 Value,
543 Extractor,
544 HashFunctions,
545 Traits,
546 KeyTraits,
547 Allocator>
548 const_iterator;
549 typedef Traits ValueTraits;
550 typedef Key KeyType;
551 typedef typename KeyTraits::PeekInType KeyPeekInType;
552 typedef typename KeyTraits::PassInType KeyPassInType;
553 typedef Value ValueType;
554 typedef Extractor ExtractorType;
555 typedef KeyTraits KeyTraitsType;
556 typedef typename Traits::PassInType ValuePassInType;
557 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
558 typedef HashTableAddResult<HashTable, ValueType> AddResult;
375 559
376 #if DUMP_HASHTABLE_STATS_PER_TABLE 560 #if DUMP_HASHTABLE_STATS_PER_TABLE
377 struct Stats { 561 struct Stats {
378 DISALLOW_NEW(Stats); 562 DISALLOW_NEW(Stats);
379 Stats() 563 Stats()
380 : numAccesses(0) 564 : numAccesses(0),
381 , numRehashes(0) 565 numRehashes(0),
382 , numRemoves(0) 566 numRemoves(0),
383 , numReinserts(0) 567 numReinserts(0),
384 , maxCollisions(0) 568 maxCollisions(0),
385 , numCollisions(0) 569 numCollisions(0),
386 , collisionGraph() 570 collisionGraph() {}
387 { 571
388 } 572 int numAccesses;
389 573 int numRehashes;
390 int numAccesses; 574 int numRemoves;
391 int numRehashes; 575 int numReinserts;
392 int numRemoves; 576
393 int numReinserts; 577 int maxCollisions;
394 578 int numCollisions;
395 int maxCollisions; 579 int collisionGraph[4096];
396 int numCollisions; 580
397 int collisionGraph[4096]; 581 void recordCollisionAtCount(int count) {
398 582 if (count > maxCollisions)
399 void recordCollisionAtCount(int count) 583 maxCollisions = count;
400 { 584 numCollisions++;
401 if (count > maxCollisions) 585 collisionGraph[count]++;
402 maxCollisions = count;
403 numCollisions++;
404 collisionGraph[count]++;
405 }
406
407 void dumpStats()
408 {
409 dataLogF("\nWTF::HashTable::Stats dump\n\n");
410 dataLogF("%d accesses\n", numAccesses);
411 dataLogF("%d total collisions, average %.2f probes per access\n", nu mCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
412 dataLogF("longest collision chain: %d\n", maxCollisions);
413 for (int i = 1; i <= maxCollisions; i++) {
414 dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f %% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
415 }
416 dataLogF("%d rehashes\n", numRehashes);
417 dataLogF("%d reinserts\n", numReinserts);
418 }
419 };
420 #endif
421
422 HashTable();
423 void finalize()
424 {
425 ASSERT(!Allocator::isGarbageCollected);
426 if (LIKELY(!m_table))
427 return;
428 ASSERT(!m_accessForbidden);
429 #if ENABLE(ASSERT)
430 m_accessForbidden = true;
431 #endif
432 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
433 #if ENABLE(ASSERT)
434 m_accessForbidden = false;
435 #endif
436 m_table = nullptr;
437 } 586 }
438 587
439 HashTable(const HashTable&); 588 void dumpStats() {
440 void swap(HashTable&); 589 dataLogF("\nWTF::HashTable::Stats dump\n\n");
441 HashTable& operator=(const HashTable&); 590 dataLogF("%d accesses\n", numAccesses);
442 591 dataLogF("%d total collisions, average %.2f probes per access\n",
443 // When the hash table is empty, just return the same iterator for end as 592 numCollisions,
444 // for begin. This is more efficient because we don't have to skip all the 593 1.0 * (numAccesses + numCollisions) / numAccesses);
445 // empty and deleted buckets, and iterating an empty table is a common case 594 dataLogF("longest collision chain: %d\n", maxCollisions);
446 // that's worth optimizing. 595 for (int i = 1; i <= maxCollisions; i++) {
447 iterator begin() { return isEmpty() ? end() : makeIterator(m_table); } 596 dataLogF(
448 iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); } 597 " %d lookups with exactly %d collisions (%.2f%% , %.2f%% with "
449 const_iterator begin() const { return isEmpty() ? end() : makeConstIterator( m_table); } 598 "this many or more)\n",
450 const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_t ableSize); } 599 collisionGraph[i], i,
451 600 100.0 * (collisionGraph[i] - collisionGraph[i + 1]) / numAccesses,
452 unsigned size() const 601 100.0 * collisionGraph[i] / numAccesses);
453 { 602 }
454 ASSERT(!m_accessForbidden); 603 dataLogF("%d rehashes\n", numRehashes);
455 return m_keyCount; 604 dataLogF("%d reinserts\n", numReinserts);
456 } 605 }
457 unsigned capacity() const 606 };
458 { 607 #endif
459 ASSERT(!m_accessForbidden); 608
460 return m_tableSize; 609 HashTable();
461 } 610 void finalize() {
462 bool isEmpty() const 611 ASSERT(!Allocator::isGarbageCollected);
463 { 612 if (LIKELY(!m_table))
464 ASSERT(!m_accessForbidden); 613 return;
465 return !m_keyCount;
466 }
467
468 void reserveCapacityForSize(unsigned size);
469
470 AddResult add(ValuePassInType value)
471 {
472 return add<IdentityTranslatorType>(Extractor::extract(value), value);
473 }
474
475 // A special version of add() that finds the object by hashing and comparing
476 // with some other type, to avoid the cost of type conversion if the object
477 // is already in the table.
478 template <typename HashTranslator, typename T, typename Extra> AddResult add (const T& key, const Extra&);
479 template <typename HashTranslator, typename T, typename Extra> AddResult add PassingHashCode(const T& key, const Extra&);
480
481 iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
482 const_iterator find(KeyPeekInType key) const { return find<IdentityTranslato rType>(key); }
483 bool contains(KeyPeekInType key) const { return contains<IdentityTranslatorT ype>(key); }
484
485 template <typename HashTranslator, typename T> iterator find(const T&);
486 template <typename HashTranslator, typename T> const_iterator find(const T&) const;
487 template <typename HashTranslator, typename T> bool contains(const T&) const ;
488
489 void remove(KeyPeekInType);
490 void remove(iterator);
491 void remove(const_iterator);
492 void clear();
493
494 static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmpty Value<KeyTraits>(Extractor::extract(value)); }
495 static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDe letedValue(Extractor::extract(value)); }
496 static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTabl eHelper<ValueType, Extractor, KeyTraits>:: isEmptyOrDeletedBucket(value); }
497
498 ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, KeyPeekInType>(key); }
499 template <typename HashTranslator, typename T> ValueType* lookup(T);
500 template <typename HashTranslator, typename T> const ValueType* lookup(T) co nst;
501
502 template <typename VisitorDispatcher> void trace(VisitorDispatcher);
503
504 #if ENABLE(ASSERT)
505 bool accessForbidden() const { return m_accessForbidden; }
506 int64_t modifications() const { return m_modifications; }
507 void registerModification() { m_modifications++; }
508 // HashTable and collections that build on it do not support modifications
509 // while there is an iterator in use. The exception is ListHashSet, which
510 // has its own iterators that tolerate modification of the underlying set.
511 void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications ); }
512 #else
513 int64_t modifications() const { return 0; }
514 void registerModification() {}
515 void checkModifications(int64_t mods) const {}
516 #endif
517
518 private:
519 static ValueType* allocateTable(unsigned size);
520 static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
521
522 typedef std::pair<ValueType*, bool> LookupType;
523 typedef std::pair<LookupType, unsigned> FullLookupType;
524
525 LookupType lookupForWriting(const Key& key) { return lookupForWriting<Identi tyTranslatorType>(key); }
526 template <typename HashTranslator, typename T> FullLookupType fullLookupForW riting(const T&);
527 template <typename HashTranslator, typename T> LookupType lookupForWriting(c onst T&);
528
529 void remove(ValueType*);
530
531 bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; }
532 bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
533 bool shouldShrink() const
534 {
535 // isAllocationAllowed check should be at the last because it's
536 // expensive.
537 return m_keyCount * m_minLoad < m_tableSize
538 && m_tableSize > KeyTraits::minimumTableSize
539 && Allocator::isAllocationAllowed();
540 }
541 ValueType* expand(ValueType* entry = 0);
542 void shrink() { rehash(m_tableSize / 2, 0); }
543
544 ValueType* expandBuffer(unsigned newTableSize, ValueType* entry, bool&);
545 ValueType* rehashTo(ValueType* newTable, unsigned newTableSize, ValueType* e ntry);
546 ValueType* rehash(unsigned newTableSize, ValueType* entry);
547 ValueType* reinsert(ValueType&);
548
549 static void initializeBucket(ValueType& bucket);
550 static void deleteBucket(ValueType& bucket)
551 {
552 bucket.~ValueType();
553 Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected);
554 }
555
556 FullLookupType makeLookupResult(ValueType* position, bool found, unsigned ha sh)
557 { return FullLookupType(LookupType(position, found), hash); }
558
559 iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_tab leSize, this); }
560 const_iterator makeConstIterator(ValueType* pos) const { return const_iterat or(pos, m_table + m_tableSize, this); }
561 iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_tabl e + m_tableSize, this, HashItemKnownGood); }
562 const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return con st_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
563
564 static const unsigned m_maxLoad = 2;
565 static const unsigned m_minLoad = 6;
566
567 unsigned tableSizeMask() const
568 {
569 size_t mask = m_tableSize - 1;
570 ASSERT((mask & m_tableSize) == 0);
571 return mask;
572 }
573
574 void setEnqueued() { m_queueFlag = true; }
575 void clearEnqueued() { m_queueFlag = false; }
576 bool enqueued() { return m_queueFlag; }
577
578 ValueType* m_table;
579 unsigned m_tableSize;
580 unsigned m_keyCount;
581 #if ENABLE(ASSERT)
582 unsigned m_deletedCount:30;
583 unsigned m_queueFlag:1;
584 unsigned m_accessForbidden:1;
585 unsigned m_modifications;
586 #else
587 unsigned m_deletedCount:31;
588 unsigned m_queueFlag:1;
589 #endif
590
591 #if DUMP_HASHTABLE_STATS_PER_TABLE
592 public:
593 mutable OwnPtr<Stats> m_stats;
594 #endif
595
596 template <WeakHandlingFlag x, typename T, typename U, typename V, typename W , typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelpe r;
597 template <typename T, typename U, typename V, typename W> friend class Linke dHashSet;
598 };
599
600 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
601 inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::HashTable()
602 : m_table(nullptr)
603 , m_tableSize(0)
604 , m_keyCount(0)
605 , m_deletedCount(0)
606 , m_queueFlag(false)
607 #if ENABLE(ASSERT)
608 , m_accessForbidden(false)
609 , m_modifications(0)
610 #endif
611 #if DUMP_HASHTABLE_STATS_PER_TABLE
612 , m_stats(adoptPtr(new Stats))
613 #endif
614 {
615 static_assert(Allocator::isGarbageCollected || (!IsPointerToGarbageCollected Type<Key>::value && !IsPointerToGarbageCollectedType<Value>::value), "Cannot put raw pointers to garbage-collected classes into an off-heap collection.");
616 }
617
618 inline unsigned doubleHash(unsigned key)
619 {
620 key = ~key + (key >> 23);
621 key ^= (key << 12);
622 key ^= (key >> 7);
623 key ^= (key << 2);
624 key ^= (key >> 20);
625 return key;
626 }
627
628 inline unsigned calculateCapacity(unsigned size)
629 {
630 for (unsigned mask = size; mask; mask >>= 1)
631 size |= mask; // 00110101010 -> 00111111111
632 return (size + 1) * 2; // 00111111111 -> 10000000000
633 }
634
635 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
636 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::reserveCapacityForSize(unsigned newSize)
637 {
638 unsigned newCapacity = calculateCapacity(newSize);
639 if (newCapacity < KeyTraits::minimumTableSize)
640 newCapacity = KeyTraits::minimumTableSize;
641
642 if (newCapacity > capacity()) {
643 RELEASE_ASSERT(!static_cast<int>(newCapacity >> 31)); // HashTable capac ity should not overflow 32bit int.
644 rehash(newCapacity, 0);
645 }
646 }
647
648 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
649 template <typename HashTranslator, typename T>
650 inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key)
651 {
652 return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTra nslator, T>(key));
653 }
654
655 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
656 template <typename HashTranslator, typename T>
657 inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator>::lookup(T key) const
658 {
659 ASSERT(!m_accessForbidden);
660 ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeTo CompareToEmptyOrDeleted>::checkKey(key)));
661 const ValueType* table = m_table;
662 if (!table)
663 return nullptr;
664
665 size_t k = 0;
666 size_t sizeMask = tableSizeMask();
667 unsigned h = HashTranslator::hash(key);
668 size_t i = h & sizeMask;
669
670 UPDATE_ACCESS_COUNTS();
671
672 while (1) {
673 const ValueType* entry = table + i;
674
675 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
676 if (HashTranslator::equal(Extractor::extract(*entry), key))
677 return entry;
678
679 if (isEmptyBucket(*entry))
680 return nullptr;
681 } else {
682 if (isEmptyBucket(*entry))
683 return nullptr;
684
685 if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::ext ract(*entry), key))
686 return entry;
687 }
688 UPDATE_PROBE_COUNTS();
689 if (!k)
690 k = 1 | doubleHash(h);
691 i = (i + k) & sizeMask;
692 }
693 }
694
695 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
696 template <typename HashTranslator, typename T>
697 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits , KeyTraits, Allocator>::lookupForWriting(const T& key)
698 {
699 ASSERT(!m_accessForbidden);
700 ASSERT(m_table);
701 registerModification();
702
703 ValueType* table = m_table;
704 size_t k = 0;
705 size_t sizeMask = tableSizeMask();
706 unsigned h = HashTranslator::hash(key);
707 size_t i = h & sizeMask;
708
709 UPDATE_ACCESS_COUNTS();
710
711 ValueType* deletedEntry = nullptr;
712
713 while (1) {
714 ValueType* entry = table + i;
715
716 if (isEmptyBucket(*entry))
717 return LookupType(deletedEntry ? deletedEntry : entry, false);
718
719 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
720 if (HashTranslator::equal(Extractor::extract(*entry), key))
721 return LookupType(entry, true);
722
723 if (isDeletedBucket(*entry))
724 deletedEntry = entry;
725 } else {
726 if (isDeletedBucket(*entry))
727 deletedEntry = entry;
728 else if (HashTranslator::equal(Extractor::extract(*entry), key))
729 return LookupType(entry, true);
730 }
731 UPDATE_PROBE_COUNTS();
732 if (!k)
733 k = 1 | doubleHash(h);
734 i = (i + k) & sizeMask;
735 }
736 }
737
738 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
739 template <typename HashTranslator, typename T>
740 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Tr aits, KeyTraits, Allocator>::fullLookupForWriting(const T& key)
741 {
742 ASSERT(!m_accessForbidden);
743 ASSERT(m_table);
744 registerModification();
745
746 ValueType* table = m_table;
747 size_t k = 0;
748 size_t sizeMask = tableSizeMask();
749 unsigned h = HashTranslator::hash(key);
750 size_t i = h & sizeMask;
751
752 UPDATE_ACCESS_COUNTS();
753
754 ValueType* deletedEntry = nullptr;
755
756 while (1) {
757 ValueType* entry = table + i;
758
759 if (isEmptyBucket(*entry))
760 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
761
762 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
763 if (HashTranslator::equal(Extractor::extract(*entry), key))
764 return makeLookupResult(entry, true, h);
765
766 if (isDeletedBucket(*entry))
767 deletedEntry = entry;
768 } else {
769 if (isDeletedBucket(*entry))
770 deletedEntry = entry;
771 else if (HashTranslator::equal(Extractor::extract(*entry), key))
772 return makeLookupResult(entry, true, h);
773 }
774 UPDATE_PROBE_COUNTS();
775 if (!k)
776 k = 1 | doubleHash(h);
777 i = (i + k) & sizeMask;
778 }
779 }
780
781 template <bool emptyValueIsZero> struct HashTableBucketInitializer;
782
783 template <> struct HashTableBucketInitializer<false> {
784 STATIC_ONLY(HashTableBucketInitializer);
785 template <typename Traits, typename Value> static void initialize(Value& buc ket)
786 {
787 new (NotNull, &bucket) Value(Traits::emptyValue());
788 }
789 };
790
791 template <> struct HashTableBucketInitializer<true> {
792 STATIC_ONLY(HashTableBucketInitializer);
793 template <typename Traits, typename Value> static void initialize(Value& buc ket)
794 {
795 // This initializes the bucket without copying the empty value. That
796 // makes it possible to use this with types that don't support copying.
797 // The memset to 0 looks like a slow operation but is optimized by the
798 // compilers.
799 memset(&bucket, 0, sizeof(bucket));
800 }
801 };
802
803 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
804 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::initializeBucket(ValueType& bucket)
805 {
806 HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Tr aits>(bucket);
807 }
808
809 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
810 template <typename HashTranslator, typename T, typename Extra>
811 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allo cator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator>::add(const T& key, const Extra& extra)
812 {
813 ASSERT(!m_accessForbidden);
814 ASSERT(Allocator::isAllocationAllowed());
815 if (!m_table)
816 expand();
817
818 ASSERT(m_table);
819
820 ValueType* table = m_table;
821 size_t k = 0;
822 size_t sizeMask = tableSizeMask();
823 unsigned h = HashTranslator::hash(key);
824 size_t i = h & sizeMask;
825
826 UPDATE_ACCESS_COUNTS();
827
828 ValueType* deletedEntry = nullptr;
829 ValueType* entry;
830 while (1) {
831 entry = table + i;
832
833 if (isEmptyBucket(*entry))
834 break;
835
836 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
837 if (HashTranslator::equal(Extractor::extract(*entry), key))
838 return AddResult(this, entry, false);
839
840 if (isDeletedBucket(*entry))
841 deletedEntry = entry;
842 } else {
843 if (isDeletedBucket(*entry))
844 deletedEntry = entry;
845 else if (HashTranslator::equal(Extractor::extract(*entry), key))
846 return AddResult(this, entry, false);
847 }
848 UPDATE_PROBE_COUNTS();
849 if (!k)
850 k = 1 | doubleHash(h);
851 i = (i + k) & sizeMask;
852 }
853
854 registerModification();
855
856 if (deletedEntry) {
857 // Overwrite any data left over from last use, using placement new or
858 // memset.
859 initializeBucket(*deletedEntry);
860 entry = deletedEntry;
861 --m_deletedCount;
862 }
863
864 HashTranslator::translate(*entry, key, extra);
865 ASSERT(!isEmptyOrDeletedBucket(*entry));
866
867 ++m_keyCount;
868
869 if (shouldExpand())
870 entry = expand(entry);
871
872 return AddResult(this, entry, true);
873 }
874
875 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
876 template <typename HashTranslator, typename T, typename Extra>
877 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allo cator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator>::addPassingHashCode(const T& key, const Extra& extra)
878 {
879 ASSERT(!m_accessForbidden);
880 ASSERT(Allocator::isAllocationAllowed());
881 if (!m_table)
882 expand();
883
884 FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
885
886 ValueType* entry = lookupResult.first.first;
887 bool found = lookupResult.first.second;
888 unsigned h = lookupResult.second;
889
890 if (found)
891 return AddResult(this, entry, false);
892
893 registerModification();
894
895 if (isDeletedBucket(*entry)) {
896 initializeBucket(*entry);
897 --m_deletedCount;
898 }
899
900 HashTranslator::translate(*entry, key, extra, h);
901 ASSERT(!isEmptyOrDeletedBucket(*entry));
902
903 ++m_keyCount;
904 if (shouldExpand())
905 entry = expand(entry);
906
907 return AddResult(this, entry, true);
908 }
909
910 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
911 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::reinsert(ValueType& entry)
912 {
913 ASSERT(m_table);
914 registerModification();
915 ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
916 ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first) ));
917 #if DUMP_HASHTABLE_STATS
918 atomicIncrement(&HashTableStats::numReinserts);
919 #endif
920 #if DUMP_HASHTABLE_STATS_PER_TABLE
921 ++m_stats->numReinserts;
922 #endif
923 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
924 Mover<ValueType, Allocator>::move(entry, *newEntry);
925
926 return newEntry;
927 }
928
929 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
930 template <typename HashTranslator, typename T>
931 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key)
932 {
933 ValueType* entry = lookup<HashTranslator>(key);
934 if (!entry)
935 return end();
936
937 return makeKnownGoodIterator(entry);
938 }
939
940 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
941 template <typename HashTranslator, typename T>
942 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Tr aits, KeyTraits, Allocator>::find(const T& key) const
943 {
944 ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key) ;
945 if (!entry)
946 return end();
947
948 return makeKnownGoodConstIterator(entry);
949 }
950
951 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
952 template <typename HashTranslator, typename T>
953 bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::contains(const T& key) const
954 {
955 return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
956 }
957
958 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
959 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::remove(ValueType* pos)
960 {
961 registerModification();
962 #if DUMP_HASHTABLE_STATS
963 atomicIncrement(&HashTableStats::numRemoves);
964 #endif
965 #if DUMP_HASHTABLE_STATS_PER_TABLE
966 ++m_stats->numRemoves;
967 #endif
968
969 ASSERT(!m_accessForbidden); 614 ASSERT(!m_accessForbidden);
970 #if ENABLE(ASSERT) 615 #if ENABLE(ASSERT)
971 m_accessForbidden = true; 616 m_accessForbidden = true;
972 #endif 617 #endif
973 deleteBucket(*pos);
974 #if ENABLE(ASSERT)
975 m_accessForbidden = false;
976 #endif
977 ++m_deletedCount;
978 --m_keyCount;
979
980 if (shouldShrink())
981 shrink();
982 }
983
984 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
985 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(iterator it)
986 {
987 if (it == end())
988 return;
989 remove(const_cast<ValueType*>(it.m_iterator.m_position));
990 }
991
992 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
993 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(const_iterator it)
994 {
995 if (it == end())
996 return;
997 remove(const_cast<ValueType*>(it.m_position));
998 }
999
1000 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1001 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(KeyPeekInType key)
1002 {
1003 remove(find(key));
1004 }
1005
1006 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1007 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::allocateTable(unsigned size)
1008 {
1009 size_t allocSize = size * sizeof(ValueType);
1010 ValueType* result;
1011 // Assert that we will not use memset on things with a vtable entry. The
1012 // compiler will also check this on some platforms. We would like to check
1013 // this on the whole value (key-value pair), but std::is_polymorphic will re turn
1014 // false for a pair of two types, even if one of the components is
1015 // polymorphic.
1016 static_assert(!Traits::emptyValueIsZero || !std::is_polymorphic<KeyType>::va lue, "empty value cannot be zero for things with a vtable");
1017
1018 #if ENABLE(OILPAN)
1019 static_assert(Allocator::isGarbageCollected
1020 || ((!AllowsOnlyPlacementNew<KeyType>::value || !NeedsTracing<KeyType>:: value)
1021 && (!AllowsOnlyPlacementNew<ValueType>::value || !NeedsTracing<ValueType >::value))
1022 , "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that have trace methods into an off-heap HashTable");
1023 #endif
1024 if (Traits::emptyValueIsZero) {
1025 result = Allocator::template allocateZeroedHashTableBacking<ValueType, H ashTable>(allocSize);
1026 } else {
1027 result = Allocator::template allocateHashTableBacking<ValueType, HashTab le>(allocSize);
1028 for (unsigned i = 0; i < size; i++)
1029 initializeBucket(result[i]);
1030 }
1031 return result;
1032 }
1033
1034 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1035 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size)
1036 {
1037 if (!IsTriviallyDestructible<ValueType>::value) {
1038 for (unsigned i = 0; i < size; ++i) {
1039 // This code is called when the hash table is cleared or resized. We
1040 // have allocated a new backing store and we need to run the
1041 // destructors on the old backing store, as it is being freed. If we
1042 // are GCing we need to both call the destructor and mark the bucket
1043 // as deleted, otherwise the destructor gets called again when the
1044 // GC finds the backing store. With the default allocator it's
1045 // enough to call the destructor, since we will free the memory
1046 // explicitly and we won't see the memory with the bucket again.
1047 if (Allocator::isGarbageCollected) {
1048 if (!isEmptyOrDeletedBucket(table[i]))
1049 deleteBucket(table[i]);
1050 } else {
1051 if (!isDeletedBucket(table[i]))
1052 table[i].~ValueType();
1053 }
1054 }
1055 }
1056 Allocator::freeHashTableBacking(table);
1057 }
1058
1059 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1060 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::expand(Value* entry)
1061 {
1062 unsigned newSize;
1063 if (!m_tableSize) {
1064 newSize = KeyTraits::minimumTableSize;
1065 } else if (mustRehashInPlace()) {
1066 newSize = m_tableSize;
1067 } else {
1068 newSize = m_tableSize * 2;
1069 RELEASE_ASSERT(newSize > m_tableSize);
1070 }
1071
1072 return rehash(newSize, entry);
1073 }
1074
1075 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1076 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::expandBuffer(unsigned newTableSize, Value* entry, bool& success)
1077 {
1078 success = false;
1079 ASSERT(m_tableSize < newTableSize);
1080 if (!Allocator::expandHashTableBacking(m_table, newTableSize * sizeof(ValueT ype)))
1081 return nullptr;
1082
1083 success = true;
1084
1085 Value* newEntry = nullptr;
1086 unsigned oldTableSize = m_tableSize;
1087 ValueType* originalTable = m_table;
1088
1089 ValueType* temporaryTable = allocateTable(oldTableSize);
1090 for (unsigned i = 0; i < oldTableSize; i++) {
1091 if (&m_table[i] == entry)
1092 newEntry = &temporaryTable[i];
1093 if (isEmptyOrDeletedBucket(m_table[i])) {
1094 ASSERT(&m_table[i] != entry);
1095 if (Traits::emptyValueIsZero) {
1096 memset(&temporaryTable[i], 0, sizeof(ValueType));
1097 } else {
1098 initializeBucket(temporaryTable[i]);
1099 }
1100 } else {
1101 Mover<ValueType, Allocator>::move(m_table[i], temporaryTable[i]);
1102 }
1103 }
1104 m_table = temporaryTable;
1105
1106 if (Traits::emptyValueIsZero) {
1107 memset(originalTable, 0, newTableSize * sizeof(ValueType));
1108 } else {
1109 for (unsigned i = 0; i < newTableSize; i++)
1110 initializeBucket(originalTable[i]);
1111 }
1112 newEntry = rehashTo(originalTable, newTableSize, newEntry);
1113
1114 ASSERT(!m_accessForbidden);
1115 #if ENABLE(ASSERT)
1116 m_accessForbidden = true;
1117 #endif
1118 deleteAllBucketsAndDeallocate(temporaryTable, oldTableSize);
1119 #if ENABLE(ASSERT)
1120 m_accessForbidden = false;
1121 #endif
1122
1123 return newEntry;
1124 }
1125
1126 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1127 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry)
1128 {
1129 unsigned oldTableSize = m_tableSize;
1130 ValueType* oldTable = m_table;
1131
1132 #if DUMP_HASHTABLE_STATS
1133 if (oldTableSize != 0)
1134 atomicIncrement(&HashTableStats::numRehashes);
1135 #endif
1136
1137 #if DUMP_HASHTABLE_STATS_PER_TABLE
1138 if (oldTableSize != 0)
1139 ++m_stats->numRehashes;
1140 #endif
1141
1142 m_table = newTable;
1143 m_tableSize = newTableSize;
1144
1145 Value* newEntry = nullptr;
1146 for (unsigned i = 0; i != oldTableSize; ++i) {
1147 if (isEmptyOrDeletedBucket(oldTable[i])) {
1148 ASSERT(&oldTable[i] != entry);
1149 continue;
1150 }
1151 Value* reinsertedEntry = reinsert(oldTable[i]);
1152 if (&oldTable[i] == entry) {
1153 ASSERT(!newEntry);
1154 newEntry = reinsertedEntry;
1155 }
1156 }
1157
1158 m_deletedCount = 0;
1159
1160 return newEntry;
1161 }
1162
1163 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1164 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::rehash(unsigned newTableSize, Value* entry)
1165 {
1166 unsigned oldTableSize = m_tableSize;
1167 ValueType* oldTable = m_table;
1168
1169 #if DUMP_HASHTABLE_STATS
1170 if (oldTableSize != 0)
1171 atomicIncrement(&HashTableStats::numRehashes);
1172 #endif
1173
1174 #if DUMP_HASHTABLE_STATS_PER_TABLE
1175 if (oldTableSize != 0)
1176 ++m_stats->numRehashes;
1177 #endif
1178
1179 // The Allocator::isGarbageCollected check is not needed. The check is just
1180 // a static hint for a compiler to indicate that Base::expandBuffer returns
1181 // false if Allocator is a PartitionAllocator.
1182 if (Allocator::isGarbageCollected && newTableSize > oldTableSize) {
1183 bool success;
1184 Value* newEntry = expandBuffer(newTableSize, entry, success);
1185 if (success)
1186 return newEntry;
1187 }
1188
1189 ValueType* newTable = allocateTable(newTableSize);
1190 Value* newEntry = rehashTo(newTable, newTableSize, entry);
1191
1192 ASSERT(!m_accessForbidden);
1193 #if ENABLE(ASSERT)
1194 m_accessForbidden = true;
1195 #endif
1196 deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1197 #if ENABLE(ASSERT)
1198 m_accessForbidden = false;
1199 #endif
1200
1201 return newEntry;
1202 }
1203
1204 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1205 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::clear()
1206 {
1207 registerModification();
1208 if (!m_table)
1209 return;
1210
1211 ASSERT(!m_accessForbidden);
1212 #if ENABLE(ASSERT)
1213 m_accessForbidden = true;
1214 #endif
1215 deleteAllBucketsAndDeallocate(m_table, m_tableSize); 618 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1216 #if ENABLE(ASSERT) 619 #if ENABLE(ASSERT)
1217 m_accessForbidden = false; 620 m_accessForbidden = false;
1218 #endif 621 #endif
1219 m_table = nullptr; 622 m_table = nullptr;
1220 m_tableSize = 0; 623 }
1221 m_keyCount = 0; 624
1222 } 625 HashTable(const HashTable&);
1223 626 void swap(HashTable&);
1224 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 627 HashTable& operator=(const HashTable&);
1225 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::H ashTable(const HashTable& other) 628
1226 : m_table(nullptr) 629 // When the hash table is empty, just return the same iterator for end as
1227 , m_tableSize(0) 630 // for begin. This is more efficient because we don't have to skip all the
1228 , m_keyCount(0) 631 // empty and deleted buckets, and iterating an empty table is a common case
1229 , m_deletedCount(0) 632 // that's worth optimizing.
1230 , m_queueFlag(false) 633 iterator begin() { return isEmpty() ? end() : makeIterator(m_table); }
634 iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
635 const_iterator begin() const {
636 return isEmpty() ? end() : makeConstIterator(m_table);
637 }
638 const_iterator end() const {
639 return makeKnownGoodConstIterator(m_table + m_tableSize);
640 }
641
642 unsigned size() const {
643 ASSERT(!m_accessForbidden);
644 return m_keyCount;
645 }
646 unsigned capacity() const {
647 ASSERT(!m_accessForbidden);
648 return m_tableSize;
649 }
650 bool isEmpty() const {
651 ASSERT(!m_accessForbidden);
652 return !m_keyCount;
653 }
654
655 void reserveCapacityForSize(unsigned size);
656
657 AddResult add(ValuePassInType value) {
658 return add<IdentityTranslatorType>(Extractor::extract(value), value);
659 }
660
661 // A special version of add() that finds the object by hashing and comparing
662 // with some other type, to avoid the cost of type conversion if the object
663 // is already in the table.
664 template <typename HashTranslator, typename T, typename Extra>
665 AddResult add(const T& key, const Extra&);
666 template <typename HashTranslator, typename T, typename Extra>
667 AddResult addPassingHashCode(const T& key, const Extra&);
668
669 iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
670 const_iterator find(KeyPeekInType key) const {
671 return find<IdentityTranslatorType>(key);
672 }
673 bool contains(KeyPeekInType key) const {
674 return contains<IdentityTranslatorType>(key);
675 }
676
677 template <typename HashTranslator, typename T>
678 iterator find(const T&);
679 template <typename HashTranslator, typename T>
680 const_iterator find(const T&) const;
681 template <typename HashTranslator, typename T>
682 bool contains(const T&) const;
683
684 void remove(KeyPeekInType);
685 void remove(iterator);
686 void remove(const_iterator);
687 void clear();
688
689 static bool isEmptyBucket(const ValueType& value) {
690 return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value));
691 }
692 static bool isDeletedBucket(const ValueType& value) {
693 return KeyTraits::isDeletedValue(Extractor::extract(value));
694 }
695 static bool isEmptyOrDeletedBucket(const ValueType& value) {
696 return HashTableHelper<ValueType, Extractor,
697 KeyTraits>::isEmptyOrDeletedBucket(value);
698 }
699
700 ValueType* lookup(KeyPeekInType key) {
701 return lookup<IdentityTranslatorType, KeyPeekInType>(key);
702 }
703 template <typename HashTranslator, typename T>
704 ValueType* lookup(T);
705 template <typename HashTranslator, typename T>
706 const ValueType* lookup(T) const;
707
708 template <typename VisitorDispatcher>
709 void trace(VisitorDispatcher);
710
1231 #if ENABLE(ASSERT) 711 #if ENABLE(ASSERT)
1232 , m_accessForbidden(false) 712 bool accessForbidden() const { return m_accessForbidden; }
1233 , m_modifications(0) 713 int64_t modifications() const { return m_modifications; }
1234 #endif 714 void registerModification() { m_modifications++; }
715 // HashTable and collections that build on it do not support modifications
716 // while there is an iterator in use. The exception is ListHashSet, which
717 // has its own iterators that tolerate modification of the underlying set.
718 void checkModifications(int64_t mods) const {
719 ASSERT(mods == m_modifications);
720 }
721 #else
722 int64_t modifications() const { return 0; }
723 void registerModification() {}
724 void checkModifications(int64_t mods) const {}
725 #endif
726
727 private:
728 static ValueType* allocateTable(unsigned size);
729 static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
730
731 typedef std::pair<ValueType*, bool> LookupType;
732 typedef std::pair<LookupType, unsigned> FullLookupType;
733
734 LookupType lookupForWriting(const Key& key) {
735 return lookupForWriting<IdentityTranslatorType>(key);
736 }
737 template <typename HashTranslator, typename T>
738 FullLookupType fullLookupForWriting(const T&);
739 template <typename HashTranslator, typename T>
740 LookupType lookupForWriting(const T&);
741
742 void remove(ValueType*);
743
744 bool shouldExpand() const {
745 return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize;
746 }
747 bool mustRehashInPlace() const {
748 return m_keyCount * m_minLoad < m_tableSize * 2;
749 }
750 bool shouldShrink() const {
751 // isAllocationAllowed check should be at the last because it's
752 // expensive.
753 return m_keyCount * m_minLoad < m_tableSize &&
754 m_tableSize > KeyTraits::minimumTableSize &&
755 Allocator::isAllocationAllowed();
756 }
757 ValueType* expand(ValueType* entry = 0);
758 void shrink() { rehash(m_tableSize / 2, 0); }
759
760 ValueType* expandBuffer(unsigned newTableSize, ValueType* entry, bool&);
761 ValueType* rehashTo(ValueType* newTable,
762 unsigned newTableSize,
763 ValueType* entry);
764 ValueType* rehash(unsigned newTableSize, ValueType* entry);
765 ValueType* reinsert(ValueType&);
766
767 static void initializeBucket(ValueType& bucket);
768 static void deleteBucket(ValueType& bucket) {
769 bucket.~ValueType();
770 Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected);
771 }
772
773 FullLookupType makeLookupResult(ValueType* position,
774 bool found,
775 unsigned hash) {
776 return FullLookupType(LookupType(position, found), hash);
777 }
778
779 iterator makeIterator(ValueType* pos) {
780 return iterator(pos, m_table + m_tableSize, this);
781 }
782 const_iterator makeConstIterator(ValueType* pos) const {
783 return const_iterator(pos, m_table + m_tableSize, this);
784 }
785 iterator makeKnownGoodIterator(ValueType* pos) {
786 return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood);
787 }
788 const_iterator makeKnownGoodConstIterator(ValueType* pos) const {
789 return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood);
790 }
791
792 static const unsigned m_maxLoad = 2;
793 static const unsigned m_minLoad = 6;
794
795 unsigned tableSizeMask() const {
796 size_t mask = m_tableSize - 1;
797 ASSERT((mask & m_tableSize) == 0);
798 return mask;
799 }
800
801 void setEnqueued() { m_queueFlag = true; }
802 void clearEnqueued() { m_queueFlag = false; }
803 bool enqueued() { return m_queueFlag; }
804
805 ValueType* m_table;
806 unsigned m_tableSize;
807 unsigned m_keyCount;
808 #if ENABLE(ASSERT)
809 unsigned m_deletedCount : 30;
810 unsigned m_queueFlag : 1;
811 unsigned m_accessForbidden : 1;
812 unsigned m_modifications;
813 #else
814 unsigned m_deletedCount : 31;
815 unsigned m_queueFlag : 1;
816 #endif
817
1235 #if DUMP_HASHTABLE_STATS_PER_TABLE 818 #if DUMP_HASHTABLE_STATS_PER_TABLE
1236 , m_stats(adoptPtr(new Stats(*other.m_stats))) 819 public:
820 mutable OwnPtr<Stats> m_stats;
821 #endif
822
823 template <WeakHandlingFlag x,
824 typename T,
825 typename U,
826 typename V,
827 typename W,
828 typename X,
829 typename Y,
830 typename Z>
831 friend struct WeakProcessingHashTableHelper;
832 template <typename T, typename U, typename V, typename W>
833 friend class LinkedHashSet;
834 };
835
836 template <typename Key,
837 typename Value,
838 typename Extractor,
839 typename HashFunctions,
840 typename Traits,
841 typename KeyTraits,
842 typename Allocator>
843 inline HashTable<Key,
844 Value,
845 Extractor,
846 HashFunctions,
847 Traits,
848 KeyTraits,
849 Allocator>::HashTable()
850 : m_table(nullptr),
851 m_tableSize(0),
852 m_keyCount(0),
853 m_deletedCount(0),
854 m_queueFlag(false)
855 #if ENABLE(ASSERT)
856 ,
857 m_accessForbidden(false),
858 m_modifications(0)
859 #endif
860 #if DUMP_HASHTABLE_STATS_PER_TABLE
861 ,
862 m_stats(adoptPtr(new Stats))
1237 #endif 863 #endif
1238 { 864 {
1239 // Copy the hash table the dumb way, by adding each element to the new 865 static_assert(Allocator::isGarbageCollected ||
1240 // table. It might be more efficient to copy the table slots, but it's not 866 (!IsPointerToGarbageCollectedType<Key>::value &&
1241 // clear that efficiency is needed. 867 !IsPointerToGarbageCollectedType<Value>::value),
1242 const_iterator end = other.end(); 868 "Cannot put raw pointers to garbage-collected classes into an "
1243 for (const_iterator it = other.begin(); it != end; ++it) 869 "off-heap collection.");
1244 add(*it); 870 }
1245 } 871
1246 872 inline unsigned doubleHash(unsigned key) {
1247 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 873 key = ~key + (key >> 23);
1248 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::swap(HashTable& other) 874 key ^= (key << 12);
875 key ^= (key >> 7);
876 key ^= (key << 2);
877 key ^= (key >> 20);
878 return key;
879 }
880
881 inline unsigned calculateCapacity(unsigned size) {
882 for (unsigned mask = size; mask; mask >>= 1)
883 size |= mask; // 00110101010 -> 00111111111
884 return (size + 1) * 2; // 00111111111 -> 10000000000
885 }
886
887 template <typename Key,
888 typename Value,
889 typename Extractor,
890 typename HashFunctions,
891 typename Traits,
892 typename KeyTraits,
893 typename Allocator>
894 void HashTable<Key,
895 Value,
896 Extractor,
897 HashFunctions,
898 Traits,
899 KeyTraits,
900 Allocator>::reserveCapacityForSize(unsigned newSize) {
901 unsigned newCapacity = calculateCapacity(newSize);
902 if (newCapacity < KeyTraits::minimumTableSize)
903 newCapacity = KeyTraits::minimumTableSize;
904
905 if (newCapacity > capacity()) {
906 RELEASE_ASSERT(!static_cast<int>(
907 newCapacity >>
908 31)); // HashTable capacity should not overflow 32bit int.
909 rehash(newCapacity, 0);
910 }
911 }
912
913 template <typename Key,
914 typename Value,
915 typename Extractor,
916 typename HashFunctions,
917 typename Traits,
918 typename KeyTraits,
919 typename Allocator>
920 template <typename HashTranslator, typename T>
921 inline Value*
922 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
923 lookup(T key) {
924 return const_cast<Value*>(
925 const_cast<const HashTable*>(this)->lookup<HashTranslator, T>(key));
926 }
927
928 template <typename Key,
929 typename Value,
930 typename Extractor,
931 typename HashFunctions,
932 typename Traits,
933 typename KeyTraits,
934 typename Allocator>
935 template <typename HashTranslator, typename T>
936 inline const Value*
937 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
938 lookup(T key) const {
939 ASSERT(!m_accessForbidden);
940 ASSERT((HashTableKeyChecker<
941 HashTranslator, KeyTraits,
942 HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key)));
943 const ValueType* table = m_table;
944 if (!table)
945 return nullptr;
946
947 size_t k = 0;
948 size_t sizeMask = tableSizeMask();
949 unsigned h = HashTranslator::hash(key);
950 size_t i = h & sizeMask;
951
952 UPDATE_ACCESS_COUNTS();
953
954 while (1) {
955 const ValueType* entry = table + i;
956
957 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
958 if (HashTranslator::equal(Extractor::extract(*entry), key))
959 return entry;
960
961 if (isEmptyBucket(*entry))
962 return nullptr;
963 } else {
964 if (isEmptyBucket(*entry))
965 return nullptr;
966
967 if (!isDeletedBucket(*entry) &&
968 HashTranslator::equal(Extractor::extract(*entry), key))
969 return entry;
970 }
971 UPDATE_PROBE_COUNTS();
972 if (!k)
973 k = 1 | doubleHash(h);
974 i = (i + k) & sizeMask;
975 }
976 }
977
978 template <typename Key,
979 typename Value,
980 typename Extractor,
981 typename HashFunctions,
982 typename Traits,
983 typename KeyTraits,
984 typename Allocator>
985 template <typename HashTranslator, typename T>
986 inline typename HashTable<Key,
987 Value,
988 Extractor,
989 HashFunctions,
990 Traits,
991 KeyTraits,
992 Allocator>::LookupType
993 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
994 lookupForWriting(const T& key) {
995 ASSERT(!m_accessForbidden);
996 ASSERT(m_table);
997 registerModification();
998
999 ValueType* table = m_table;
1000 size_t k = 0;
1001 size_t sizeMask = tableSizeMask();
1002 unsigned h = HashTranslator::hash(key);
1003 size_t i = h & sizeMask;
1004
1005 UPDATE_ACCESS_COUNTS();
1006
1007 ValueType* deletedEntry = nullptr;
1008
1009 while (1) {
1010 ValueType* entry = table + i;
1011
1012 if (isEmptyBucket(*entry))
1013 return LookupType(deletedEntry ? deletedEntry : entry, false);
1014
1015 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
1016 if (HashTranslator::equal(Extractor::extract(*entry), key))
1017 return LookupType(entry, true);
1018
1019 if (isDeletedBucket(*entry))
1020 deletedEntry = entry;
1021 } else {
1022 if (isDeletedBucket(*entry))
1023 deletedEntry = entry;
1024 else if (HashTranslator::equal(Extractor::extract(*entry), key))
1025 return LookupType(entry, true);
1026 }
1027 UPDATE_PROBE_COUNTS();
1028 if (!k)
1029 k = 1 | doubleHash(h);
1030 i = (i + k) & sizeMask;
1031 }
1032 }
1033
1034 template <typename Key,
1035 typename Value,
1036 typename Extractor,
1037 typename HashFunctions,
1038 typename Traits,
1039 typename KeyTraits,
1040 typename Allocator>
1041 template <typename HashTranslator, typename T>
1042 inline typename HashTable<Key,
1043 Value,
1044 Extractor,
1045 HashFunctions,
1046 Traits,
1047 KeyTraits,
1048 Allocator>::FullLookupType
1049 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1050 fullLookupForWriting(const T& key) {
1051 ASSERT(!m_accessForbidden);
1052 ASSERT(m_table);
1053 registerModification();
1054
1055 ValueType* table = m_table;
1056 size_t k = 0;
1057 size_t sizeMask = tableSizeMask();
1058 unsigned h = HashTranslator::hash(key);
1059 size_t i = h & sizeMask;
1060
1061 UPDATE_ACCESS_COUNTS();
1062
1063 ValueType* deletedEntry = nullptr;
1064
1065 while (1) {
1066 ValueType* entry = table + i;
1067
1068 if (isEmptyBucket(*entry))
1069 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
1070
1071 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
1072 if (HashTranslator::equal(Extractor::extract(*entry), key))
1073 return makeLookupResult(entry, true, h);
1074
1075 if (isDeletedBucket(*entry))
1076 deletedEntry = entry;
1077 } else {
1078 if (isDeletedBucket(*entry))
1079 deletedEntry = entry;
1080 else if (HashTranslator::equal(Extractor::extract(*entry), key))
1081 return makeLookupResult(entry, true, h);
1082 }
1083 UPDATE_PROBE_COUNTS();
1084 if (!k)
1085 k = 1 | doubleHash(h);
1086 i = (i + k) & sizeMask;
1087 }
1088 }
1089
1090 template <bool emptyValueIsZero>
1091 struct HashTableBucketInitializer;
1092
1093 template <>
1094 struct HashTableBucketInitializer<false> {
1095 STATIC_ONLY(HashTableBucketInitializer);
1096 template <typename Traits, typename Value>
1097 static void initialize(Value& bucket) {
1098 new (NotNull, &bucket) Value(Traits::emptyValue());
1099 }
1100 };
1101
1102 template <>
1103 struct HashTableBucketInitializer<true> {
1104 STATIC_ONLY(HashTableBucketInitializer);
1105 template <typename Traits, typename Value>
1106 static void initialize(Value& bucket) {
1107 // This initializes the bucket without copying the empty value. That
1108 // makes it possible to use this with types that don't support copying.
1109 // The memset to 0 looks like a slow operation but is optimized by the
1110 // compilers.
1111 memset(&bucket, 0, sizeof(bucket));
1112 }
1113 };
1114
1115 template <typename Key,
1116 typename Value,
1117 typename Extractor,
1118 typename HashFunctions,
1119 typename Traits,
1120 typename KeyTraits,
1121 typename Allocator>
1122 inline void
1123 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1124 initializeBucket(ValueType& bucket) {
1125 HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<
1126 Traits>(bucket);
1127 }
1128
1129 template <typename Key,
1130 typename Value,
1131 typename Extractor,
1132 typename HashFunctions,
1133 typename Traits,
1134 typename KeyTraits,
1135 typename Allocator>
1136 template <typename HashTranslator, typename T, typename Extra>
1137 typename HashTable<Key,
1138 Value,
1139 Extractor,
1140 HashFunctions,
1141 Traits,
1142 KeyTraits,
1143 Allocator>::AddResult
1144 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1145 add(const T& key, const Extra& extra) {
1146 ASSERT(!m_accessForbidden);
1147 ASSERT(Allocator::isAllocationAllowed());
1148 if (!m_table)
1149 expand();
1150
1151 ASSERT(m_table);
1152
1153 ValueType* table = m_table;
1154 size_t k = 0;
1155 size_t sizeMask = tableSizeMask();
1156 unsigned h = HashTranslator::hash(key);
1157 size_t i = h & sizeMask;
1158
1159 UPDATE_ACCESS_COUNTS();
1160
1161 ValueType* deletedEntry = nullptr;
1162 ValueType* entry;
1163 while (1) {
1164 entry = table + i;
1165
1166 if (isEmptyBucket(*entry))
1167 break;
1168
1169 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
1170 if (HashTranslator::equal(Extractor::extract(*entry), key))
1171 return AddResult(this, entry, false);
1172
1173 if (isDeletedBucket(*entry))
1174 deletedEntry = entry;
1175 } else {
1176 if (isDeletedBucket(*entry))
1177 deletedEntry = entry;
1178 else if (HashTranslator::equal(Extractor::extract(*entry), key))
1179 return AddResult(this, entry, false);
1180 }
1181 UPDATE_PROBE_COUNTS();
1182 if (!k)
1183 k = 1 | doubleHash(h);
1184 i = (i + k) & sizeMask;
1185 }
1186
1187 registerModification();
1188
1189 if (deletedEntry) {
1190 // Overwrite any data left over from last use, using placement new or
1191 // memset.
1192 initializeBucket(*deletedEntry);
1193 entry = deletedEntry;
1194 --m_deletedCount;
1195 }
1196
1197 HashTranslator::translate(*entry, key, extra);
1198 ASSERT(!isEmptyOrDeletedBucket(*entry));
1199
1200 ++m_keyCount;
1201
1202 if (shouldExpand())
1203 entry = expand(entry);
1204
1205 return AddResult(this, entry, true);
1206 }
1207
1208 template <typename Key,
1209 typename Value,
1210 typename Extractor,
1211 typename HashFunctions,
1212 typename Traits,
1213 typename KeyTraits,
1214 typename Allocator>
1215 template <typename HashTranslator, typename T, typename Extra>
1216 typename HashTable<Key,
1217 Value,
1218 Extractor,
1219 HashFunctions,
1220 Traits,
1221 KeyTraits,
1222 Allocator>::AddResult
1223 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1224 addPassingHashCode(const T& key, const Extra& extra) {
1225 ASSERT(!m_accessForbidden);
1226 ASSERT(Allocator::isAllocationAllowed());
1227 if (!m_table)
1228 expand();
1229
1230 FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
1231
1232 ValueType* entry = lookupResult.first.first;
1233 bool found = lookupResult.first.second;
1234 unsigned h = lookupResult.second;
1235
1236 if (found)
1237 return AddResult(this, entry, false);
1238
1239 registerModification();
1240
1241 if (isDeletedBucket(*entry)) {
1242 initializeBucket(*entry);
1243 --m_deletedCount;
1244 }
1245
1246 HashTranslator::translate(*entry, key, extra, h);
1247 ASSERT(!isEmptyOrDeletedBucket(*entry));
1248
1249 ++m_keyCount;
1250 if (shouldExpand())
1251 entry = expand(entry);
1252
1253 return AddResult(this, entry, true);
1254 }
1255
1256 template <typename Key,
1257 typename Value,
1258 typename Extractor,
1259 typename HashFunctions,
1260 typename Traits,
1261 typename KeyTraits,
1262 typename Allocator>
1263 Value*
1264 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1265 reinsert(ValueType& entry) {
1266 ASSERT(m_table);
1267 registerModification();
1268 ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
1269 ASSERT(
1270 !isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
1271 #if DUMP_HASHTABLE_STATS
1272 atomicIncrement(&HashTableStats::numReinserts);
1273 #endif
1274 #if DUMP_HASHTABLE_STATS_PER_TABLE
1275 ++m_stats->numReinserts;
1276 #endif
1277 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
1278 Mover<ValueType, Allocator>::move(entry, *newEntry);
1279
1280 return newEntry;
1281 }
1282
1283 template <typename Key,
1284 typename Value,
1285 typename Extractor,
1286 typename HashFunctions,
1287 typename Traits,
1288 typename KeyTraits,
1289 typename Allocator>
1290 template <typename HashTranslator, typename T>
1291 inline typename HashTable<Key,
1292 Value,
1293 Extractor,
1294 HashFunctions,
1295 Traits,
1296 KeyTraits,
1297 Allocator>::iterator
1298 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1299 find(const T& key) {
1300 ValueType* entry = lookup<HashTranslator>(key);
1301 if (!entry)
1302 return end();
1303
1304 return makeKnownGoodIterator(entry);
1305 }
1306
1307 template <typename Key,
1308 typename Value,
1309 typename Extractor,
1310 typename HashFunctions,
1311 typename Traits,
1312 typename KeyTraits,
1313 typename Allocator>
1314 template <typename HashTranslator, typename T>
1315 inline typename HashTable<Key,
1316 Value,
1317 Extractor,
1318 HashFunctions,
1319 Traits,
1320 KeyTraits,
1321 Allocator>::const_iterator
1322 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1323 find(const T& key) const {
1324 ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
1325 if (!entry)
1326 return end();
1327
1328 return makeKnownGoodConstIterator(entry);
1329 }
1330
1331 template <typename Key,
1332 typename Value,
1333 typename Extractor,
1334 typename HashFunctions,
1335 typename Traits,
1336 typename KeyTraits,
1337 typename Allocator>
1338 template <typename HashTranslator, typename T>
1339 bool HashTable<Key,
1340 Value,
1341 Extractor,
1342 HashFunctions,
1343 Traits,
1344 KeyTraits,
1345 Allocator>::contains(const T& key) const {
1346 return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
1347 }
1348
1349 template <typename Key,
1350 typename Value,
1351 typename Extractor,
1352 typename HashFunctions,
1353 typename Traits,
1354 typename KeyTraits,
1355 typename Allocator>
1356 void HashTable<Key,
1357 Value,
1358 Extractor,
1359 HashFunctions,
1360 Traits,
1361 KeyTraits,
1362 Allocator>::remove(ValueType* pos) {
1363 registerModification();
1364 #if DUMP_HASHTABLE_STATS
1365 atomicIncrement(&HashTableStats::numRemoves);
1366 #endif
1367 #if DUMP_HASHTABLE_STATS_PER_TABLE
1368 ++m_stats->numRemoves;
1369 #endif
1370
1371 ASSERT(!m_accessForbidden);
1372 #if ENABLE(ASSERT)
1373 m_accessForbidden = true;
1374 #endif
1375 deleteBucket(*pos);
1376 #if ENABLE(ASSERT)
1377 m_accessForbidden = false;
1378 #endif
1379 ++m_deletedCount;
1380 --m_keyCount;
1381
1382 if (shouldShrink())
1383 shrink();
1384 }
1385
1386 template <typename Key,
1387 typename Value,
1388 typename Extractor,
1389 typename HashFunctions,
1390 typename Traits,
1391 typename KeyTraits,
1392 typename Allocator>
1393 inline void
1394 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1395 remove(iterator it) {
1396 if (it == end())
1397 return;
1398 remove(const_cast<ValueType*>(it.m_iterator.m_position));
1399 }
1400
1401 template <typename Key,
1402 typename Value,
1403 typename Extractor,
1404 typename HashFunctions,
1405 typename Traits,
1406 typename KeyTraits,
1407 typename Allocator>
1408 inline void
1409 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1410 remove(const_iterator it) {
1411 if (it == end())
1412 return;
1413 remove(const_cast<ValueType*>(it.m_position));
1414 }
1415
1416 template <typename Key,
1417 typename Value,
1418 typename Extractor,
1419 typename HashFunctions,
1420 typename Traits,
1421 typename KeyTraits,
1422 typename Allocator>
1423 inline void
1424 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1425 remove(KeyPeekInType key) {
1426 remove(find(key));
1427 }
1428
1429 template <typename Key,
1430 typename Value,
1431 typename Extractor,
1432 typename HashFunctions,
1433 typename Traits,
1434 typename KeyTraits,
1435 typename Allocator>
1436 Value*
1437 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1438 allocateTable(unsigned size) {
1439 size_t allocSize = size * sizeof(ValueType);
1440 ValueType* result;
1441 // Assert that we will not use memset on things with a vtable entry. The
1442 // compiler will also check this on some platforms. We would like to check
1443 // this on the whole value (key-value pair), but std::is_polymorphic will retu rn
1444 // false for a pair of two types, even if one of the components is
1445 // polymorphic.
1446 static_assert(
1447 !Traits::emptyValueIsZero || !std::is_polymorphic<KeyType>::value,
1448 "empty value cannot be zero for things with a vtable");
1449
1450 #if ENABLE(OILPAN)
1451 static_assert(Allocator::isGarbageCollected ||
1452 ((!AllowsOnlyPlacementNew<KeyType>::value ||
1453 !NeedsTracing<KeyType>::value) &&
1454 (!AllowsOnlyPlacementNew<ValueType>::value ||
1455 !NeedsTracing<ValueType>::value)),
1456 "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that "
1457 "have trace methods into an off-heap HashTable");
1458 #endif
1459 if (Traits::emptyValueIsZero) {
1460 result = Allocator::template allocateZeroedHashTableBacking<ValueType,
1461 HashTable>(
1462 allocSize);
1463 } else {
1464 result = Allocator::template allocateHashTableBacking<ValueType, HashTable>(
1465 allocSize);
1466 for (unsigned i = 0; i < size; i++)
1467 initializeBucket(result[i]);
1468 }
1469 return result;
1470 }
1471
1472 template <typename Key,
1473 typename Value,
1474 typename Extractor,
1475 typename HashFunctions,
1476 typename Traits,
1477 typename KeyTraits,
1478 typename Allocator>
1479 void HashTable<Key,
1480 Value,
1481 Extractor,
1482 HashFunctions,
1483 Traits,
1484 KeyTraits,
1485 Allocator>::deleteAllBucketsAndDeallocate(ValueType* table,
1486 unsigned size) {
1487 if (!IsTriviallyDestructible<ValueType>::value) {
1488 for (unsigned i = 0; i < size; ++i) {
1489 // This code is called when the hash table is cleared or resized. We
1490 // have allocated a new backing store and we need to run the
1491 // destructors on the old backing store, as it is being freed. If we
1492 // are GCing we need to both call the destructor and mark the bucket
1493 // as deleted, otherwise the destructor gets called again when the
1494 // GC finds the backing store. With the default allocator it's
1495 // enough to call the destructor, since we will free the memory
1496 // explicitly and we won't see the memory with the bucket again.
1497 if (Allocator::isGarbageCollected) {
1498 if (!isEmptyOrDeletedBucket(table[i]))
1499 deleteBucket(table[i]);
1500 } else {
1501 if (!isDeletedBucket(table[i]))
1502 table[i].~ValueType();
1503 }
1504 }
1505 }
1506 Allocator::freeHashTableBacking(table);
1507 }
1508
1509 template <typename Key,
1510 typename Value,
1511 typename Extractor,
1512 typename HashFunctions,
1513 typename Traits,
1514 typename KeyTraits,
1515 typename Allocator>
1516 Value*
1517 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1518 expand(Value* entry) {
1519 unsigned newSize;
1520 if (!m_tableSize) {
1521 newSize = KeyTraits::minimumTableSize;
1522 } else if (mustRehashInPlace()) {
1523 newSize = m_tableSize;
1524 } else {
1525 newSize = m_tableSize * 2;
1526 RELEASE_ASSERT(newSize > m_tableSize);
1527 }
1528
1529 return rehash(newSize, entry);
1530 }
1531
1532 template <typename Key,
1533 typename Value,
1534 typename Extractor,
1535 typename HashFunctions,
1536 typename Traits,
1537 typename KeyTraits,
1538 typename Allocator>
1539 Value*
1540 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1541 expandBuffer(unsigned newTableSize, Value* entry, bool& success) {
1542 success = false;
1543 ASSERT(m_tableSize < newTableSize);
1544 if (!Allocator::expandHashTableBacking(m_table,
1545 newTableSize * sizeof(ValueType)))
1546 return nullptr;
1547
1548 success = true;
1549
1550 Value* newEntry = nullptr;
1551 unsigned oldTableSize = m_tableSize;
1552 ValueType* originalTable = m_table;
1553
1554 ValueType* temporaryTable = allocateTable(oldTableSize);
1555 for (unsigned i = 0; i < oldTableSize; i++) {
1556 if (&m_table[i] == entry)
1557 newEntry = &temporaryTable[i];
1558 if (isEmptyOrDeletedBucket(m_table[i])) {
1559 ASSERT(&m_table[i] != entry);
1560 if (Traits::emptyValueIsZero) {
1561 memset(&temporaryTable[i], 0, sizeof(ValueType));
1562 } else {
1563 initializeBucket(temporaryTable[i]);
1564 }
1565 } else {
1566 Mover<ValueType, Allocator>::move(m_table[i], temporaryTable[i]);
1567 }
1568 }
1569 m_table = temporaryTable;
1570
1571 if (Traits::emptyValueIsZero) {
1572 memset(originalTable, 0, newTableSize * sizeof(ValueType));
1573 } else {
1574 for (unsigned i = 0; i < newTableSize; i++)
1575 initializeBucket(originalTable[i]);
1576 }
1577 newEntry = rehashTo(originalTable, newTableSize, newEntry);
1578
1579 ASSERT(!m_accessForbidden);
1580 #if ENABLE(ASSERT)
1581 m_accessForbidden = true;
1582 #endif
1583 deleteAllBucketsAndDeallocate(temporaryTable, oldTableSize);
1584 #if ENABLE(ASSERT)
1585 m_accessForbidden = false;
1586 #endif
1587
1588 return newEntry;
1589 }
1590
1591 template <typename Key,
1592 typename Value,
1593 typename Extractor,
1594 typename HashFunctions,
1595 typename Traits,
1596 typename KeyTraits,
1597 typename Allocator>
1598 Value*
1599 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1600 rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry) {
1601 unsigned oldTableSize = m_tableSize;
1602 ValueType* oldTable = m_table;
1603
1604 #if DUMP_HASHTABLE_STATS
1605 if (oldTableSize != 0)
1606 atomicIncrement(&HashTableStats::numRehashes);
1607 #endif
1608
1609 #if DUMP_HASHTABLE_STATS_PER_TABLE
1610 if (oldTableSize != 0)
1611 ++m_stats->numRehashes;
1612 #endif
1613
1614 m_table = newTable;
1615 m_tableSize = newTableSize;
1616
1617 Value* newEntry = nullptr;
1618 for (unsigned i = 0; i != oldTableSize; ++i) {
1619 if (isEmptyOrDeletedBucket(oldTable[i])) {
1620 ASSERT(&oldTable[i] != entry);
1621 continue;
1622 }
1623 Value* reinsertedEntry = reinsert(oldTable[i]);
1624 if (&oldTable[i] == entry) {
1625 ASSERT(!newEntry);
1626 newEntry = reinsertedEntry;
1627 }
1628 }
1629
1630 m_deletedCount = 0;
1631
1632 return newEntry;
1633 }
1634
1635 template <typename Key,
1636 typename Value,
1637 typename Extractor,
1638 typename HashFunctions,
1639 typename Traits,
1640 typename KeyTraits,
1641 typename Allocator>
1642 Value*
1643 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1644 rehash(unsigned newTableSize, Value* entry) {
1645 unsigned oldTableSize = m_tableSize;
1646 ValueType* oldTable = m_table;
1647
1648 #if DUMP_HASHTABLE_STATS
1649 if (oldTableSize != 0)
1650 atomicIncrement(&HashTableStats::numRehashes);
1651 #endif
1652
1653 #if DUMP_HASHTABLE_STATS_PER_TABLE
1654 if (oldTableSize != 0)
1655 ++m_stats->numRehashes;
1656 #endif
1657
1658 // The Allocator::isGarbageCollected check is not needed. The check is just
1659 // a static hint for a compiler to indicate that Base::expandBuffer returns
1660 // false if Allocator is a PartitionAllocator.
1661 if (Allocator::isGarbageCollected && newTableSize > oldTableSize) {
1662 bool success;
1663 Value* newEntry = expandBuffer(newTableSize, entry, success);
1664 if (success)
1665 return newEntry;
1666 }
1667
1668 ValueType* newTable = allocateTable(newTableSize);
1669 Value* newEntry = rehashTo(newTable, newTableSize, entry);
1670
1671 ASSERT(!m_accessForbidden);
1672 #if ENABLE(ASSERT)
1673 m_accessForbidden = true;
1674 #endif
1675 deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1676 #if ENABLE(ASSERT)
1677 m_accessForbidden = false;
1678 #endif
1679
1680 return newEntry;
1681 }
1682
1683 template <typename Key,
1684 typename Value,
1685 typename Extractor,
1686 typename HashFunctions,
1687 typename Traits,
1688 typename KeyTraits,
1689 typename Allocator>
1690 void HashTable<Key,
1691 Value,
1692 Extractor,
1693 HashFunctions,
1694 Traits,
1695 KeyTraits,
1696 Allocator>::clear() {
1697 registerModification();
1698 if (!m_table)
1699 return;
1700
1701 ASSERT(!m_accessForbidden);
1702 #if ENABLE(ASSERT)
1703 m_accessForbidden = true;
1704 #endif
1705 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1706 #if ENABLE(ASSERT)
1707 m_accessForbidden = false;
1708 #endif
1709 m_table = nullptr;
1710 m_tableSize = 0;
1711 m_keyCount = 0;
1712 }
1713
1714 template <typename Key,
1715 typename Value,
1716 typename Extractor,
1717 typename HashFunctions,
1718 typename Traits,
1719 typename KeyTraits,
1720 typename Allocator>
1721 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1722 HashTable(const HashTable& other)
1723 : m_table(nullptr),
1724 m_tableSize(0),
1725 m_keyCount(0),
1726 m_deletedCount(0),
1727 m_queueFlag(false)
1728 #if ENABLE(ASSERT)
1729 ,
1730 m_accessForbidden(false),
1731 m_modifications(0)
1732 #endif
1733 #if DUMP_HASHTABLE_STATS_PER_TABLE
1734 ,
1735 m_stats(adoptPtr(new Stats(*other.m_stats)))
1736 #endif
1249 { 1737 {
1250 ASSERT(!m_accessForbidden); 1738 // Copy the hash table the dumb way, by adding each element to the new
1251 std::swap(m_table, other.m_table); 1739 // table. It might be more efficient to copy the table slots, but it's not
1252 std::swap(m_tableSize, other.m_tableSize); 1740 // clear that efficiency is needed.
1253 std::swap(m_keyCount, other.m_keyCount); 1741 const_iterator end = other.end();
1254 // std::swap does not work for bit fields. 1742 for (const_iterator it = other.begin(); it != end; ++it)
1255 unsigned deleted = m_deletedCount; 1743 add(*it);
1256 m_deletedCount = other.m_deletedCount; 1744 }
1257 other.m_deletedCount = deleted; 1745
1258 ASSERT(!m_queueFlag); 1746 template <typename Key,
1259 ASSERT(!other.m_queueFlag); 1747 typename Value,
1748 typename Extractor,
1749 typename HashFunctions,
1750 typename Traits,
1751 typename KeyTraits,
1752 typename Allocator>
1753 void HashTable<Key,
1754 Value,
1755 Extractor,
1756 HashFunctions,
1757 Traits,
1758 KeyTraits,
1759 Allocator>::swap(HashTable& other) {
1760 ASSERT(!m_accessForbidden);
1761 std::swap(m_table, other.m_table);
1762 std::swap(m_tableSize, other.m_tableSize);
1763 std::swap(m_keyCount, other.m_keyCount);
1764 // std::swap does not work for bit fields.
1765 unsigned deleted = m_deletedCount;
1766 m_deletedCount = other.m_deletedCount;
1767 other.m_deletedCount = deleted;
1768 ASSERT(!m_queueFlag);
1769 ASSERT(!other.m_queueFlag);
1260 1770
1261 #if ENABLE(ASSERT) 1771 #if ENABLE(ASSERT)
1262 std::swap(m_modifications, other.m_modifications); 1772 std::swap(m_modifications, other.m_modifications);
1263 #endif 1773 #endif
1264 1774
1265 #if DUMP_HASHTABLE_STATS_PER_TABLE 1775 #if DUMP_HASHTABLE_STATS_PER_TABLE
1266 m_stats.swap(other.m_stats); 1776 m_stats.swap(other.m_stats);
1267 #endif 1777 #endif
1268 } 1778 }
1269 1779
1270 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 1780 template <typename Key,
1271 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& H ashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::op erator=(const HashTable& other) 1781 typename Value,
1272 { 1782 typename Extractor,
1273 HashTable tmp(other); 1783 typename HashFunctions,
1274 swap(tmp); 1784 typename Traits,
1785 typename KeyTraits,
1786 typename Allocator>
1787 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>&
1788 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1789 operator=(const HashTable& other) {
1790 HashTable tmp(other);
1791 swap(tmp);
1792 return *this;
1793 }
1794
1795 template <WeakHandlingFlag weakHandlingFlag,
1796 typename Key,
1797 typename Value,
1798 typename Extractor,
1799 typename HashFunctions,
1800 typename Traits,
1801 typename KeyTraits,
1802 typename Allocator>
1803 struct WeakProcessingHashTableHelper;
1804
1805 template <typename Key,
1806 typename Value,
1807 typename Extractor,
1808 typename HashFunctions,
1809 typename Traits,
1810 typename KeyTraits,
1811 typename Allocator>
1812 struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections,
1813 Key,
1814 Value,
1815 Extractor,
1816 HashFunctions,
1817 Traits,
1818 KeyTraits,
1819 Allocator> {
1820 STATIC_ONLY(WeakProcessingHashTableHelper);
1821 static void process(typename Allocator::Visitor* visitor, void* closure) {}
1822 static void ephemeronIteration(typename Allocator::Visitor* visitor,
1823 void* closure) {}
1824 static void ephemeronIterationDone(typename Allocator::Visitor* visitor,
1825 void* closure) {}
1826 };
1827
1828 template <typename Key,
1829 typename Value,
1830 typename Extractor,
1831 typename HashFunctions,
1832 typename Traits,
1833 typename KeyTraits,
1834 typename Allocator>
1835 struct WeakProcessingHashTableHelper<WeakHandlingInCollections,
1836 Key,
1837 Value,
1838 Extractor,
1839 HashFunctions,
1840 Traits,
1841 KeyTraits,
1842 Allocator> {
1843 STATIC_ONLY(WeakProcessingHashTableHelper);
1844 // Used for purely weak and for weak-and-strong tables (ephemerons).
1845 static void process(typename Allocator::Visitor* visitor, void* closure) {
1846 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits,
1847 Allocator>
1848 HashTableType;
1849 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1850 if (!table->m_table)
1851 return;
1852 // Now perform weak processing (this is a no-op if the backing was
1853 // accessible through an iterator and was already marked strongly).
1854 typedef typename HashTableType::ValueType ValueType;
1855 for (ValueType* element = table->m_table + table->m_tableSize - 1;
1856 element >= table->m_table; element--) {
1857 if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
1858 // At this stage calling trace can make no difference
1859 // (everything is already traced), but we use the return value
1860 // to remove things from the collection.
1861
1862 // FIXME: This should be rewritten so that this can check if the
1863 // element is dead without calling trace, which is semantically
1864 // not correct to be called in weak processing stage.
1865 if (TraceInCollectionTrait<WeakHandlingInCollections,
1866 WeakPointersActWeak, ValueType,
1867 Traits>::trace(visitor, *element)) {
1868 table->registerModification();
1869 HashTableType::deleteBucket(*element); // Also calls the destructor.
1870 table->m_deletedCount++;
1871 table->m_keyCount--;
1872 // We don't rehash the backing until the next add or delete,
1873 // because that would cause allocation during GC.
1874 }
1875 }
1876 }
1877 }
1878
1879 // Called repeatedly for tables that have both weak and strong pointers.
1880 static void ephemeronIteration(typename Allocator::Visitor* visitor,
1881 void* closure) {
1882 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits,
1883 Allocator>
1884 HashTableType;
1885 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1886 ASSERT(table->m_table);
1887 // Check the hash table for elements that we now know will not be
1888 // removed by weak processing. Those elements need to have their strong
1889 // pointers traced.
1890 typedef typename HashTableType::ValueType ValueType;
1891 for (ValueType* element = table->m_table + table->m_tableSize - 1;
1892 element >= table->m_table; element--) {
1893 if (!HashTableType::isEmptyOrDeletedBucket(*element))
1894 TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak,
1895 ValueType, Traits>::trace(visitor, *element);
1896 }
1897 }
1898
1899 // Called when the ephemeron iteration is done and before running the per
1900 // thread weak processing. It is guaranteed to be called before any thread
1901 // is resumed.
1902 static void ephemeronIterationDone(typename Allocator::Visitor* visitor,
1903 void* closure) {
1904 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits,
1905 Allocator>
1906 HashTableType;
1907 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1908 ASSERT(Allocator::weakTableRegistered(visitor, table));
1909 table->clearEnqueued();
1910 }
1911 };
1912
1913 template <typename Key,
1914 typename Value,
1915 typename Extractor,
1916 typename HashFunctions,
1917 typename Traits,
1918 typename KeyTraits,
1919 typename Allocator>
1920 template <typename VisitorDispatcher>
1921 void HashTable<Key,
1922 Value,
1923 Extractor,
1924 HashFunctions,
1925 Traits,
1926 KeyTraits,
1927 Allocator>::trace(VisitorDispatcher visitor) {
1928 // If someone else already marked the backing and queued up the trace and/or
1929 // weak callback then we are done. This optimization does not happen for
1930 // ListHashSet since its iterator does not point at the backing.
1931 if (!m_table || Allocator::isHeapObjectAlive(m_table))
1932 return;
1933 // Normally, we mark the backing store without performing trace. This means
1934 // it is marked live, but the pointers inside it are not marked. Instead we
1935 // will mark the pointers below. However, for backing stores that contain
1936 // weak pointers the handling is rather different. We don't mark the
1937 // backing store here, so the marking GC will leave the backing unmarked. If
1938 // the backing is found in any other way than through its HashTable (ie from
1939 // an iterator) then the mark bit will be set and the pointers will be
1940 // marked strongly, avoiding problems with iterating over things that
1941 // disappear due to weak processing while we are iterating over them. We
1942 // register the backing store pointer for delayed marking which will take
1943 // place after we know if the backing is reachable from elsewhere. We also
1944 // register a weakProcessing callback which will perform weak processing if
1945 // needed.
1946 if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) {
1947 Allocator::markNoTracing(visitor, m_table);
1948 } else {
1949 Allocator::registerDelayedMarkNoTracing(visitor, m_table);
1950 // Since we're delaying marking this HashTable, it is possible that the
1951 // registerWeakMembers is called multiple times (in rare
1952 // cases). However, it shouldn't cause any issue.
1953 Allocator::registerWeakMembers(
1954 visitor, this, m_table,
1955 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value,
1956 Extractor, HashFunctions, Traits,
1957 KeyTraits, Allocator>::process);
1958 }
1959 if (NeedsTracingTrait<Traits>::value) {
1960 if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
1961 // If we have both strong and weak pointers in the collection then
1962 // we queue up the collection for fixed point iteration a la
1963 // Ephemerons:
1964 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1965 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1966 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this));
1967 if (!enqueued()) {
1968 Allocator::registerWeakTable(
1969 visitor, this,
1970 WeakProcessingHashTableHelper<
1971 Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions,
1972 Traits, KeyTraits, Allocator>::ephemeronIteration,
1973 WeakProcessingHashTableHelper<
1974 Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions,
1975 Traits, KeyTraits, Allocator>::ephemeronIterationDone);
1976 setEnqueued();
1977 }
1978 // We don't need to trace the elements here, since registering as a
1979 // weak table above will cause them to be traced (perhaps several
1980 // times). It's better to wait until everything else is traced
1981 // before tracing the elements for the first time; this may reduce
1982 // (by one) the number of iterations needed to get to a fixed point.
1983 return;
1984 }
1985 for (ValueType* element = m_table + m_tableSize - 1; element >= m_table;
1986 element--) {
1987 if (!isEmptyOrDeletedBucket(*element))
1988 Allocator::template trace<VisitorDispatcher, ValueType, Traits>(
1989 visitor, *element);
1990 }
1991 }
1992 }
1993
1994 // iterator adapters
1995
1996 template <typename HashTableType, typename Traits>
1997 struct HashTableConstIteratorAdapter {
1998 STACK_ALLOCATED();
1999 HashTableConstIteratorAdapter() {}
2000 HashTableConstIteratorAdapter(
2001 const typename HashTableType::const_iterator& impl)
2002 : m_impl(impl) {}
2003 typedef typename Traits::IteratorConstGetType GetType;
2004 typedef
2005 typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType;
2006
2007 GetType get() const {
2008 return const_cast<GetType>(SourceGetType(m_impl.get()));
2009 }
2010 typename Traits::IteratorConstReferenceType operator*() const {
2011 return Traits::getToReferenceConstConversion(get());
2012 }
2013 GetType operator->() const { return get(); }
2014
2015 HashTableConstIteratorAdapter& operator++() {
2016 ++m_impl;
1275 return *this; 2017 return *this;
1276 } 2018 }
1277 2019 // postfix ++ intentionally omitted
1278 template <WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typen ame Extractor, typename HashFunctions, typename Traits, typename KeyTraits, type name Allocator> 2020
1279 struct WeakProcessingHashTableHelper; 2021 typename HashTableType::const_iterator m_impl;
1280
1281 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1282 struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Ex tractor, HashFunctions, Traits, KeyTraits, Allocator> {
1283 STATIC_ONLY(WeakProcessingHashTableHelper);
1284 static void process(typename Allocator::Visitor* visitor, void* closure) {}
1285 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* c losure) {}
1286 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, voi d* closure) {}
1287 }; 2022 };
1288 2023
1289 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 2024 template <typename HashTableType, typename Traits>
1290 struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extr actor, HashFunctions, Traits, KeyTraits, Allocator> { 2025 struct HashTableIteratorAdapter {
1291 STATIC_ONLY(WeakProcessingHashTableHelper); 2026 STACK_ALLOCATED();
1292 // Used for purely weak and for weak-and-strong tables (ephemerons). 2027 typedef typename Traits::IteratorGetType GetType;
1293 static void process(typename Allocator::Visitor* visitor, void* closure) 2028 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
1294 { 2029
1295 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator> HashTableType; 2030 HashTableIteratorAdapter() {}
1296 HashTableType* table = reinterpret_cast<HashTableType*>(closure); 2031 HashTableIteratorAdapter(const typename HashTableType::iterator& impl)
1297 if (!table->m_table) 2032 : m_impl(impl) {}
1298 return; 2033
1299 // Now perform weak processing (this is a no-op if the backing was 2034 GetType get() const {
1300 // accessible through an iterator and was already marked strongly). 2035 return const_cast<GetType>(SourceGetType(m_impl.get()));
1301 typedef typename HashTableType::ValueType ValueType; 2036 }
1302 for (ValueType* element = table->m_table + table->m_tableSize - 1; eleme nt >= table->m_table; element--) { 2037 typename Traits::IteratorReferenceType operator*() const {
1303 if (!HashTableType::isEmptyOrDeletedBucket(*element)) { 2038 return Traits::getToReferenceConversion(get());
1304 // At this stage calling trace can make no difference 2039 }
1305 // (everything is already traced), but we use the return value 2040 GetType operator->() const { return get(); }
1306 // to remove things from the collection. 2041
1307 2042 HashTableIteratorAdapter& operator++() {
1308 // FIXME: This should be rewritten so that this can check if the 2043 ++m_impl;
1309 // element is dead without calling trace, which is semantically 2044 return *this;
1310 // not correct to be called in weak processing stage. 2045 }
1311 if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointe rsActWeak, ValueType, Traits>::trace(visitor, *element)) { 2046 // postfix ++ intentionally omitted
1312 table->registerModification(); 2047
1313 HashTableType::deleteBucket(*element); // Also calls the des tructor. 2048 operator HashTableConstIteratorAdapter<HashTableType, Traits>() {
1314 table->m_deletedCount++; 2049 typename HashTableType::const_iterator i = m_impl;
1315 table->m_keyCount--; 2050 return i;
1316 // We don't rehash the backing until the next add or delete, 2051 }
1317 // because that would cause allocation during GC. 2052
1318 } 2053 typename HashTableType::iterator m_impl;
1319 }
1320 }
1321 }
1322
1323 // Called repeatedly for tables that have both weak and strong pointers.
1324 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* c losure)
1325 {
1326 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator> HashTableType;
1327 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1328 ASSERT(table->m_table);
1329 // Check the hash table for elements that we now know will not be
1330 // removed by weak processing. Those elements need to have their strong
1331 // pointers traced.
1332 typedef typename HashTableType::ValueType ValueType;
1333 for (ValueType* element = table->m_table + table->m_tableSize - 1; eleme nt >= table->m_table; element--) {
1334 if (!HashTableType::isEmptyOrDeletedBucket(*element))
1335 TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersAc tWeak, ValueType, Traits>::trace(visitor, *element);
1336 }
1337 }
1338
1339 // Called when the ephemeron iteration is done and before running the per
1340 // thread weak processing. It is guaranteed to be called before any thread
1341 // is resumed.
1342 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, voi d* closure)
1343 {
1344 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator> HashTableType;
1345 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1346 ASSERT(Allocator::weakTableRegistered(visitor, table));
1347 table->clearEnqueued();
1348 }
1349 }; 2054 };
1350 2055
1351 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1352 template <typename VisitorDispatcher>
1353 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::trace(VisitorDispatcher visitor)
1354 {
1355 // If someone else already marked the backing and queued up the trace and/or
1356 // weak callback then we are done. This optimization does not happen for
1357 // ListHashSet since its iterator does not point at the backing.
1358 if (!m_table || Allocator::isHeapObjectAlive(m_table))
1359 return;
1360 // Normally, we mark the backing store without performing trace. This means
1361 // it is marked live, but the pointers inside it are not marked. Instead we
1362 // will mark the pointers below. However, for backing stores that contain
1363 // weak pointers the handling is rather different. We don't mark the
1364 // backing store here, so the marking GC will leave the backing unmarked. If
1365 // the backing is found in any other way than through its HashTable (ie from
1366 // an iterator) then the mark bit will be set and the pointers will be
1367 // marked strongly, avoiding problems with iterating over things that
1368 // disappear due to weak processing while we are iterating over them. We
1369 // register the backing store pointer for delayed marking which will take
1370 // place after we know if the backing is reachable from elsewhere. We also
1371 // register a weakProcessing callback which will perform weak processing if
1372 // needed.
1373 if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) {
1374 Allocator::markNoTracing(visitor, m_table);
1375 } else {
1376 Allocator::registerDelayedMarkNoTracing(visitor, m_table);
1377 // Since we're delaying marking this HashTable, it is possible that the
1378 // registerWeakMembers is called multiple times (in rare
1379 // cases). However, it shouldn't cause any issue.
1380 Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHas hTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Tra its, KeyTraits, Allocator>::process);
1381 }
1382 if (NeedsTracingTrait<Traits>::value) {
1383 if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
1384 // If we have both strong and weak pointers in the collection then
1385 // we queue up the collection for fixed point iteration a la
1386 // Ephemerons:
1387 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1388 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1389 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this)) ;
1390 if (!enqueued()) {
1391 Allocator::registerWeakTable(visitor, this,
1392 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterat ion,
1393 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterat ionDone);
1394 setEnqueued();
1395 }
1396 // We don't need to trace the elements here, since registering as a
1397 // weak table above will cause them to be traced (perhaps several
1398 // times). It's better to wait until everything else is traced
1399 // before tracing the elements for the first time; this may reduce
1400 // (by one) the number of iterations needed to get to a fixed point.
1401 return;
1402 }
1403 for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; element--) {
1404 if (!isEmptyOrDeletedBucket(*element))
1405 Allocator::template trace<VisitorDispatcher, ValueType, Traits>( visitor, *element);
1406 }
1407 }
1408 }
1409
1410 // iterator adapters
1411
1412 template <typename HashTableType, typename Traits> struct HashTableConstIterator Adapter {
1413 STACK_ALLOCATED();
1414 HashTableConstIteratorAdapter() {}
1415 HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {}
1416 typedef typename Traits::IteratorConstGetType GetType;
1417 typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetT ype;
1418
1419 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get()) ); }
1420 typename Traits::IteratorConstReferenceType operator*() const { return Trait s::getToReferenceConstConversion(get()); }
1421 GetType operator->() const { return get(); }
1422
1423 HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; }
1424 // postfix ++ intentionally omitted
1425
1426 typename HashTableType::const_iterator m_impl;
1427 };
1428
1429 template <typename HashTableType, typename Traits> struct HashTableIteratorAdapt er {
1430 STACK_ALLOCATED();
1431 typedef typename Traits::IteratorGetType GetType;
1432 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
1433
1434 HashTableIteratorAdapter() {}
1435 HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_i mpl(impl) {}
1436
1437 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get()) ); }
1438 typename Traits::IteratorReferenceType operator*() const { return Traits::ge tToReferenceConversion(get()); }
1439 GetType operator->() const { return get(); }
1440
1441 HashTableIteratorAdapter& operator++() { ++m_impl; return *this; }
1442 // postfix ++ intentionally omitted
1443
1444 operator HashTableConstIteratorAdapter<HashTableType, Traits>()
1445 {
1446 typename HashTableType::const_iterator i = m_impl;
1447 return i;
1448 }
1449
1450 typename HashTableType::iterator m_impl;
1451 };
1452
1453 template <typename T, typename U> 2056 template <typename T, typename U>
1454 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableConstIteratorAdapter<T, U>& b) 2057 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a,
1455 { 2058 const HashTableConstIteratorAdapter<T, U>& b) {
1456 return a.m_impl == b.m_impl; 2059 return a.m_impl == b.m_impl;
1457 } 2060 }
1458 2061
1459 template <typename T, typename U> 2062 template <typename T, typename U>
1460 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableConstIteratorAdapter<T, U>& b) 2063 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a,
1461 { 2064 const HashTableConstIteratorAdapter<T, U>& b) {
1462 return a.m_impl != b.m_impl; 2065 return a.m_impl != b.m_impl;
1463 } 2066 }
1464 2067
1465 template <typename T, typename U> 2068 template <typename T, typename U>
1466 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableI teratorAdapter<T, U>& b) 2069 inline bool operator==(const HashTableIteratorAdapter<T, U>& a,
1467 { 2070 const HashTableIteratorAdapter<T, U>& b) {
1468 return a.m_impl == b.m_impl; 2071 return a.m_impl == b.m_impl;
1469 } 2072 }
1470 2073
1471 template <typename T, typename U> 2074 template <typename T, typename U>
1472 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableI teratorAdapter<T, U>& b) 2075 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a,
1473 { 2076 const HashTableIteratorAdapter<T, U>& b) {
1474 return a.m_impl != b.m_impl; 2077 return a.m_impl != b.m_impl;
1475 } 2078 }
1476 2079
1477 // All 4 combinations of ==, != and Const,non const. 2080 // All 4 combinations of ==, != and Const,non const.
1478 template <typename T, typename U> 2081 template <typename T, typename U>
1479 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableIteratorAdapter<T, U>& b) 2082 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a,
1480 { 2083 const HashTableIteratorAdapter<T, U>& b) {
1481 return a.m_impl == b.m_impl; 2084 return a.m_impl == b.m_impl;
1482 } 2085 }
1483 2086
1484 template <typename T, typename U> 2087 template <typename T, typename U>
1485 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableIteratorAdapter<T, U>& b) 2088 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a,
1486 { 2089 const HashTableIteratorAdapter<T, U>& b) {
1487 return a.m_impl != b.m_impl; 2090 return a.m_impl != b.m_impl;
1488 } 2091 }
1489 2092
1490 template <typename T, typename U> 2093 template <typename T, typename U>
1491 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableC onstIteratorAdapter<T, U>& b) 2094 inline bool operator==(const HashTableIteratorAdapter<T, U>& a,
1492 { 2095 const HashTableConstIteratorAdapter<T, U>& b) {
1493 return a.m_impl == b.m_impl; 2096 return a.m_impl == b.m_impl;
1494 } 2097 }
1495 2098
1496 template <typename T, typename U> 2099 template <typename T, typename U>
1497 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableC onstIteratorAdapter<T, U>& b) 2100 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a,
1498 { 2101 const HashTableConstIteratorAdapter<T, U>& b) {
1499 return a.m_impl != b.m_impl; 2102 return a.m_impl != b.m_impl;
1500 } 2103 }
1501 2104
1502 template <typename Collection1, typename Collection2> 2105 template <typename Collection1, typename Collection2>
1503 inline void removeAll(Collection1& collection, const Collection2& toBeRemoved) 2106 inline void removeAll(Collection1& collection, const Collection2& toBeRemoved) {
1504 { 2107 if (collection.isEmpty() || toBeRemoved.isEmpty())
1505 if (collection.isEmpty() || toBeRemoved.isEmpty()) 2108 return;
1506 return; 2109 typedef typename Collection2::const_iterator CollectionIterator;
1507 typedef typename Collection2::const_iterator CollectionIterator; 2110 CollectionIterator end(toBeRemoved.end());
1508 CollectionIterator end(toBeRemoved.end()); 2111 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
1509 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it) 2112 collection.remove(*it);
1510 collection.remove(*it); 2113 }
1511 } 2114
1512 2115 } // namespace WTF
1513 } // namespace WTF
1514 2116
1515 #include "wtf/HashIterators.h" 2117 #include "wtf/HashIterators.h"
1516 2118
1517 #endif // WTF_HashTable_h 2119 #endif // WTF_HashTable_h
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/wtf/HashSetTest.cpp ('k') | third_party/WebKit/Source/wtf/HashTable.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698