Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(452)

Side by Side Diff: third_party/WebKit/Source/wtf/HashTable.h

Issue 1436153002: Apply clang-format with Chromium-style without column limit. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserv ed. 2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights reserv ed.
3 * Copyright (C) 2008 David Levin <levin@chromium.org> 3 * Copyright (C) 2008 David Levin <levin@chromium.org>
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public 6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either 7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version. 8 * version 2 of the License, or (at your option) any later version.
9 * 9 *
10 * This library is distributed in the hope that it will be useful, 10 * This library is distributed in the hope that it will be useful,
(...skipping 17 matching lines...) Expand all
28 28
29 #define DUMP_HASHTABLE_STATS 0 29 #define DUMP_HASHTABLE_STATS 0
30 #define DUMP_HASHTABLE_STATS_PER_TABLE 0 30 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
31 31
32 #if DUMP_HASHTABLE_STATS_PER_TABLE 32 #if DUMP_HASHTABLE_STATS_PER_TABLE
33 #include "wtf/DataLog.h" 33 #include "wtf/DataLog.h"
34 #endif 34 #endif
35 35
36 #if DUMP_HASHTABLE_STATS 36 #if DUMP_HASHTABLE_STATS
37 #if DUMP_HASHTABLE_STATS_PER_TABLE 37 #if DUMP_HASHTABLE_STATS_PER_TABLE
38 #define UPDATE_PROBE_COUNTS() \ 38 #define UPDATE_PROBE_COUNTS() \
39 ++probeCount; \ 39 ++probeCount; \
40 HashTableStats::recordCollisionAtCount(probeCount); \ 40 HashTableStats::recordCollisionAtCount(probeCount); \
41 ++perTableProbeCount; \ 41 ++perTableProbeCount; \
42 m_stats->recordCollisionAtCount(perTableProbeCount) 42 m_stats->recordCollisionAtCount(perTableProbeCount)
43 #define UPDATE_ACCESS_COUNTS() \ 43 #define UPDATE_ACCESS_COUNTS() \
44 atomicIncrement(&HashTableStats::numAccesses); \ 44 atomicIncrement(&HashTableStats::numAccesses); \
45 int probeCount = 0; \ 45 int probeCount = 0; \
46 ++m_stats->numAccesses; \ 46 ++m_stats->numAccesses; \
47 int perTableProbeCount = 0 47 int perTableProbeCount = 0
48 #else 48 #else
49 #define UPDATE_PROBE_COUNTS() \ 49 #define UPDATE_PROBE_COUNTS() \
50 ++probeCount; \ 50 ++probeCount; \
51 HashTableStats::recordCollisionAtCount(probeCount) 51 HashTableStats::recordCollisionAtCount(probeCount)
52 #define UPDATE_ACCESS_COUNTS() \ 52 #define UPDATE_ACCESS_COUNTS() \
53 atomicIncrement(&HashTableStats::numAccesses); \ 53 atomicIncrement(&HashTableStats::numAccesses); \
54 int probeCount = 0 54 int probeCount = 0
55 #endif 55 #endif
56 #else 56 #else
57 #if DUMP_HASHTABLE_STATS_PER_TABLE 57 #if DUMP_HASHTABLE_STATS_PER_TABLE
58 #define UPDATE_PROBE_COUNTS() \ 58 #define UPDATE_PROBE_COUNTS() \
59 ++perTableProbeCount; \ 59 ++perTableProbeCount; \
60 m_stats->recordCollisionAtCount(perTableProbeCount) 60 m_stats->recordCollisionAtCount(perTableProbeCount)
61 #define UPDATE_ACCESS_COUNTS() \ 61 #define UPDATE_ACCESS_COUNTS() \
62 ++m_stats->numAccesses; \ 62 ++m_stats->numAccesses; \
63 int perTableProbeCount = 0 63 int perTableProbeCount = 0
64 #else 64 #else
65 #define UPDATE_PROBE_COUNTS() do { } while (0) 65 #define UPDATE_PROBE_COUNTS() \
66 #define UPDATE_ACCESS_COUNTS() do { } while (0) 66 do { \
67 } while (0)
68 #define UPDATE_ACCESS_COUNTS() \
69 do { \
70 } while (0)
67 #endif 71 #endif
68 #endif 72 #endif
69 73
70 namespace WTF { 74 namespace WTF {
71 75
72 #if DUMP_HASHTABLE_STATS 76 #if DUMP_HASHTABLE_STATS
73 77
74 struct HashTableStats { 78 struct HashTableStats {
75 // The following variables are all atomically incremented when modified. 79 // The following variables are all atomically incremented when modified.
76 static int numAccesses; 80 static int numAccesses;
77 static int numRehashes; 81 static int numRehashes;
78 static int numRemoves; 82 static int numRemoves;
79 static int numReinserts; 83 static int numReinserts;
80 84
81 // The following variables are only modified in the recordCollisionAtCount 85 // The following variables are only modified in the recordCollisionAtCount
82 // method within a mutex. 86 // method within a mutex.
83 static int maxCollisions; 87 static int maxCollisions;
84 static int numCollisions; 88 static int numCollisions;
85 static int collisionGraph[4096]; 89 static int collisionGraph[4096];
86 90
87 static void recordCollisionAtCount(int count); 91 static void recordCollisionAtCount(int count);
88 static void dumpStats(); 92 static void dumpStats();
89 }; 93 };
90 94
91 #endif 95 #endif
92 96
93 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 97 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
94 class HashTable; 98 class HashTable;
95 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 99 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
96 class HashTableIterator; 100 class HashTableIterator;
97 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 101 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
98 class HashTableConstIterator; 102 class HashTableConstIterator;
99 template <typename Value, typename HashFunctions, typename HashTraits, typename Allocator> 103 template <typename Value, typename HashFunctions, typename HashTraits, typename Allocator>
100 class LinkedHashSet; 104 class LinkedHashSet;
101 template <WeakHandlingFlag x, typename T, typename U, typename V, typename W, ty pename X, typename Y, typename Z> 105 template <WeakHandlingFlag x, typename T, typename U, typename V, typename W, ty pename X, typename Y, typename Z>
102 struct WeakProcessingHashTableHelper; 106 struct WeakProcessingHashTableHelper;
103 107
104 typedef enum { HashItemKnownGood } HashItemKnownGoodTag; 108 typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
105 109
106 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 110 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
107 class HashTableConstIterator { 111 class HashTableConstIterator {
108 private: 112 private:
109 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator> HashTableType; 113 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, All ocator> HashTableType;
110 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator> iterator; 114 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator> iterator;
111 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator; 115 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, K eyTraits, Allocator> const_iterator;
112 typedef Value ValueType; 116 typedef Value ValueType;
113 typedef typename Traits::IteratorConstGetType GetType; 117 typedef typename Traits::IteratorConstGetType GetType;
114 typedef const ValueType* PointerType; 118 typedef const ValueType* PointerType;
115 119
116 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrai ts, Allocator>; 120 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits , Allocator>;
117 friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>; 121 friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, K eyTraits, Allocator>;
118 122
119 void skipEmptyBuckets() 123 void skipEmptyBuckets() {
120 { 124 while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket( *m_position))
121 while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBuc ket(*m_position)) 125 ++m_position;
122 ++m_position; 126 }
123 } 127
124 128 HashTableConstIterator(PointerType position, PointerType endPosition, const Ha shTableType* container)
125 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container) 129 : m_position(position), m_endPosition(endPosition)
126 : m_position(position)
127 , m_endPosition(endPosition)
128 #if ENABLE(ASSERT) 130 #if ENABLE(ASSERT)
129 , m_container(container) 131 ,
130 , m_containerModifications(container->modifications()) 132 m_container(container),
131 #endif 133 m_containerModifications(container->modifications())
132 { 134 #endif
133 skipEmptyBuckets(); 135 {
134 } 136 skipEmptyBuckets();
135 137 }
136 HashTableConstIterator(PointerType position, PointerType endPosition, const HashTableType* container, HashItemKnownGoodTag) 138
137 : m_position(position) 139 HashTableConstIterator(PointerType position, PointerType endPosition, const Ha shTableType* container, HashItemKnownGoodTag)
138 , m_endPosition(endPosition) 140 : m_position(position), m_endPosition(endPosition)
139 #if ENABLE(ASSERT) 141 #if ENABLE(ASSERT)
140 , m_container(container) 142 ,
141 , m_containerModifications(container->modifications()) 143 m_container(container),
142 #endif 144 m_containerModifications(container->modifications())
143 { 145 #endif
144 ASSERT(m_containerModifications == m_container->modifications()); 146 {
145 } 147 ASSERT(m_containerModifications == m_container->modifications());
146 148 }
147 void checkModifications() const 149
148 { 150 void checkModifications() const {
149 // HashTable and collections that build on it do not support 151 // HashTable and collections that build on it do not support
150 // modifications while there is an iterator in use. The exception is 152 // modifications while there is an iterator in use. The exception is
151 // ListHashSet, which has its own iterators that tolerate modification 153 // ListHashSet, which has its own iterators that tolerate modification
152 // of the underlying set. 154 // of the underlying set.
153 ASSERT(m_containerModifications == m_container->modifications()); 155 ASSERT(m_containerModifications == m_container->modifications());
154 ASSERT(!m_container->accessForbidden()); 156 ASSERT(!m_container->accessForbidden());
155 } 157 }
156 158
157 public: 159 public:
158 HashTableConstIterator() {} 160 HashTableConstIterator() {}
159 161
160 GetType get() const 162 GetType get() const {
161 { 163 checkModifications();
162 checkModifications(); 164 return m_position;
163 return m_position; 165 }
164 } 166 typename Traits::IteratorConstReferenceType operator*() const { return Traits: :getToReferenceConstConversion(get()); }
165 typename Traits::IteratorConstReferenceType operator*() const { return Trait s::getToReferenceConstConversion(get()); } 167 GetType operator->() const { return get(); }
166 GetType operator->() const { return get(); } 168
167 169 const_iterator& operator++() {
168 const_iterator& operator++() 170 ASSERT(m_position != m_endPosition);
169 { 171 checkModifications();
170 ASSERT(m_position != m_endPosition); 172 ++m_position;
171 checkModifications(); 173 skipEmptyBuckets();
172 ++m_position; 174 return *this;
173 skipEmptyBuckets(); 175 }
174 return *this; 176
175 } 177 // postfix ++ intentionally omitted
176 178
177 // postfix ++ intentionally omitted 179 // Comparison.
178 180 bool operator==(const const_iterator& other) const {
179 // Comparison. 181 return m_position == other.m_position;
180 bool operator==(const const_iterator& other) const 182 }
181 { 183 bool operator!=(const const_iterator& other) const {
182 return m_position == other.m_position; 184 return m_position != other.m_position;
183 } 185 }
184 bool operator!=(const const_iterator& other) const 186 bool operator==(const iterator& other) const {
185 { 187 return *this == static_cast<const_iterator>(other);
186 return m_position != other.m_position; 188 }
187 } 189 bool operator!=(const iterator& other) const {
188 bool operator==(const iterator& other) const 190 return *this != static_cast<const_iterator>(other);
189 { 191 }
190 return *this == static_cast<const_iterator>(other); 192
191 } 193 private:
192 bool operator!=(const iterator& other) const 194 PointerType m_position;
193 { 195 PointerType m_endPosition;
194 return *this != static_cast<const_iterator>(other);
195 }
196
197 private:
198 PointerType m_position;
199 PointerType m_endPosition;
200 #if ENABLE(ASSERT) 196 #if ENABLE(ASSERT)
201 const HashTableType* m_container; 197 const HashTableType* m_container;
202 int64_t m_containerModifications; 198 int64_t m_containerModifications;
203 #endif 199 #endif
204 }; 200 };
205 201
206 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 202 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
207 class HashTableIterator { 203 class HashTableIterator {
208 private: 204 private:
209 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator> HashTableType; 205 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, All ocator> HashTableType;
210 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator> iterator; 206 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator> iterator;
211 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator; 207 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, K eyTraits, Allocator> const_iterator;
212 typedef Value ValueType; 208 typedef Value ValueType;
213 typedef typename Traits::IteratorGetType GetType; 209 typedef typename Traits::IteratorGetType GetType;
214 typedef ValueType* PointerType; 210 typedef ValueType* PointerType;
215 211
216 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrai ts, Allocator>; 212 friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits , Allocator>;
217 213
218 HashTableIterator(PointerType pos, PointerType end, const HashTableType* con tainer) : m_iterator(pos, end, container) {} 214 HashTableIterator(PointerType pos, PointerType end, const HashTableType* conta iner)
219 HashTableIterator(PointerType pos, PointerType end, const HashTableType* con tainer, HashItemKnownGoodTag tag) : m_iterator(pos, end, container, tag) {} 215 : m_iterator(pos, end, container) {}
220 216 HashTableIterator(PointerType pos, PointerType end, const HashTableType* conta iner, HashItemKnownGoodTag tag)
221 public: 217 : m_iterator(pos, end, container, tag) {}
222 HashTableIterator() {} 218
223 219 public:
224 // default copy, assignment and destructor are OK 220 HashTableIterator() {}
225 221
226 GetType get() const { return const_cast<GetType>(m_iterator.get()); } 222 // default copy, assignment and destructor are OK
227 typename Traits::IteratorReferenceType operator*() const { return Traits::ge tToReferenceConversion(get()); } 223
228 GetType operator->() const { return get(); } 224 GetType get() const { return const_cast<GetType>(m_iterator.get()); }
229 225 typename Traits::IteratorReferenceType operator*() const { return Traits::getT oReferenceConversion(get()); }
230 iterator& operator++() { ++m_iterator; return *this; } 226 GetType operator->() const { return get(); }
231 227
232 // postfix ++ intentionally omitted 228 iterator& operator++() {
233 229 ++m_iterator;
234 // Comparison. 230 return *this;
235 bool operator==(const iterator& other) const { return m_iterator == other.m_ iterator; } 231 }
236 bool operator!=(const iterator& other) const { return m_iterator != other.m_ iterator; } 232
237 bool operator==(const const_iterator& other) const { return m_iterator == ot her; } 233 // postfix ++ intentionally omitted
238 bool operator!=(const const_iterator& other) const { return m_iterator != ot her; } 234
239 235 // Comparison.
240 operator const_iterator() const { return m_iterator; } 236 bool operator==(const iterator& other) const { return m_iterator == other.m_it erator; }
241 237 bool operator!=(const iterator& other) const { return m_iterator != other.m_it erator; }
242 private: 238 bool operator==(const const_iterator& other) const { return m_iterator == othe r; }
243 const_iterator m_iterator; 239 bool operator!=(const const_iterator& other) const { return m_iterator != othe r; }
240
241 operator const_iterator() const { return m_iterator; }
242
243 private:
244 const_iterator m_iterator;
244 }; 245 };
245 246
246 using std::swap; 247 using std::swap;
247 248
248 // Work around MSVC's standard library, whose swap for pairs does not swap by co mponent. 249 // Work around MSVC's standard library, whose swap for pairs does not swap by co mponent.
249 template <typename T> inline void hashTableSwap(T& a, T& b) 250 template <typename T>
250 { 251 inline void hashTableSwap(T& a, T& b) {
251 swap(a, b); 252 swap(a, b);
252 } 253 }
253 254
254 template <typename T, typename U> inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b) 255 template <typename T, typename U>
255 { 256 inline void hashTableSwap(KeyValuePair<T, U>& a, KeyValuePair<T, U>& b) {
256 swap(a.key, b.key); 257 swap(a.key, b.key);
257 swap(a.value, b.value); 258 swap(a.value, b.value);
258 } 259 }
259 260
260 template <typename T, typename Allocator, bool useSwap = !IsTriviallyDestructibl e<T>::value> 261 template <typename T, typename Allocator, bool useSwap = !IsTriviallyDestructibl e<T>::value>
261 struct Mover; 262 struct Mover;
262 template <typename T, typename Allocator> struct Mover<T, Allocator, true> { 263 template <typename T, typename Allocator>
263 static void move(T& from, T& to) 264 struct Mover<T, Allocator, true> {
264 { 265 static void move(T& from, T& to) {
265 // The key and value cannot be swapped atomically, and it would be wrong 266 // The key and value cannot be swapped atomically, and it would be wrong
266 // to have a GC when only one was swapped and the other still contained 267 // to have a GC when only one was swapped and the other still contained
267 // garbage (eg. from a previous use of the same slot). Therefore we 268 // garbage (eg. from a previous use of the same slot). Therefore we
268 // forbid a GC until both the key and the value are swapped. 269 // forbid a GC until both the key and the value are swapped.
269 Allocator::enterGCForbiddenScope(); 270 Allocator::enterGCForbiddenScope();
270 hashTableSwap(from, to); 271 hashTableSwap(from, to);
271 Allocator::leaveGCForbiddenScope(); 272 Allocator::leaveGCForbiddenScope();
272 } 273 }
273 }; 274 };
274 275
275 template <typename T, typename Allocator> struct Mover<T, Allocator, false> { 276 template <typename T, typename Allocator>
276 static void move(T& from, T& to) { to = from; } 277 struct Mover<T, Allocator, false> {
277 }; 278 static void move(T& from, T& to) { to = from; }
278 279 };
279 template <typename HashFunctions> class IdentityHashTranslator { 280
280 public: 281 template <typename HashFunctions>
281 template <typename T> static unsigned hash(const T& key) { return HashFuncti ons::hash(key); } 282 class IdentityHashTranslator {
282 template <typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); } 283 public:
283 template <typename T, typename U, typename V> static void translate(T& locat ion, const U&, const V& value) { location = value; } 284 template <typename T>
284 }; 285 static unsigned hash(const T& key) { return HashFunctions::hash(key); }
285 286 template <typename T, typename U>
286 template <typename HashTableType, typename ValueType> struct HashTableAddResult { 287 static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); }
287 HashTableAddResult(const HashTableType* container, ValueType* storedValue, b ool isNewEntry) 288 template <typename T, typename U, typename V>
288 : storedValue(storedValue) 289 static void translate(T& location, const U&, const V& value) { location = valu e; }
289 , isNewEntry(isNewEntry) 290 };
291
292 template <typename HashTableType, typename ValueType>
293 struct HashTableAddResult {
294 HashTableAddResult(const HashTableType* container, ValueType* storedValue, boo l isNewEntry)
295 : storedValue(storedValue), isNewEntry(isNewEntry)
290 #if ENABLE(SECURITY_ASSERT) 296 #if ENABLE(SECURITY_ASSERT)
291 , m_container(container) 297 ,
292 , m_containerModifications(container->modifications()) 298 m_container(container),
293 #endif 299 m_containerModifications(container->modifications())
294 { 300 #endif
295 ASSERT_UNUSED(container, container); 301 {
296 } 302 ASSERT_UNUSED(container, container);
297 303 }
298 ValueType* storedValue; 304
299 bool isNewEntry; 305 ValueType* storedValue;
306 bool isNewEntry;
300 307
301 #if ENABLE(SECURITY_ASSERT) 308 #if ENABLE(SECURITY_ASSERT)
302 ~HashTableAddResult() 309 ~HashTableAddResult() {
303 { 310 // If rehash happened before accessing storedValue, it's
304 // If rehash happened before accessing storedValue, it's 311 // use-after-free. Any modification may cause a rehash, so we check for
305 // use-after-free. Any modification may cause a rehash, so we check for 312 // modifications here.
306 // modifications here. 313
307 314 // Rehash after accessing storedValue is harmless but will assert if the
308 // Rehash after accessing storedValue is harmless but will assert if the 315 // AddResult destructor takes place after a modification. You may need
309 // AddResult destructor takes place after a modification. You may need 316 // to limit the scope of the AddResult.
310 // to limit the scope of the AddResult. 317 ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container->mo difications());
311 ASSERT_WITH_SECURITY_IMPLICATION(m_containerModifications == m_container ->modifications()); 318 }
312 } 319
313 320 private:
314 private: 321 const HashTableType* m_container;
315 const HashTableType* m_container; 322 const int64_t m_containerModifications;
316 const int64_t m_containerModifications;
317 #endif 323 #endif
318 }; 324 };
319 325
320 template <typename Value, typename Extractor, typename KeyTraits> 326 template <typename Value, typename Extractor, typename KeyTraits>
321 struct HashTableHelper { 327 struct HashTableHelper {
322 static bool isEmptyBucket(const Value& value) { return isHashTraitsEmptyValu e<KeyTraits>(Extractor::extract(value)); } 328 static bool isEmptyBucket(const Value& value) { return isHashTraitsEmptyValue< KeyTraits>(Extractor::extract(value)); }
323 static bool isDeletedBucket(const Value& value) { return KeyTraits::isDelete dValue(Extractor::extract(value)); } 329 static bool isDeletedBucket(const Value& value) { return KeyTraits::isDeletedV alue(Extractor::extract(value)); }
324 static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucke t(value) || isDeletedBucket(value); } 330 static bool isEmptyOrDeletedBucket(const Value& value) { return isEmptyBucket( value) || isDeletedBucket(value); }
325 }; 331 };
326 332
327 template <typename HashTranslator, typename KeyTraits, bool safeToCompareToEmpty OrDeleted> 333 template <typename HashTranslator, typename KeyTraits, bool safeToCompareToEmpty OrDeleted>
328 struct HashTableKeyChecker { 334 struct HashTableKeyChecker {
329 // There's no simple generic way to make this check if 335 // There's no simple generic way to make this check if
330 // safeToCompareToEmptyOrDeleted is false, so the check always passes. 336 // safeToCompareToEmptyOrDeleted is false, so the check always passes.
331 template <typename T> 337 template <typename T>
332 static bool checkKey(const T&) { return true; } 338 static bool checkKey(const T&) { return true; }
333 }; 339 };
334 340
335 template <typename HashTranslator, typename KeyTraits> 341 template <typename HashTranslator, typename KeyTraits>
336 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> { 342 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
337 template <typename T> 343 template <typename T>
338 static bool checkKey(const T& key) 344 static bool checkKey(const T& key) {
339 { 345 // FIXME : Check also equality to the deleted value.
340 // FIXME : Check also equality to the deleted value. 346 return !HashTranslator::equal(KeyTraits::emptyValue(), key);
341 return !HashTranslator::equal(KeyTraits::emptyValue(), key); 347 }
342 }
343 }; 348 };
344 349
345 // Note: empty or deleted key values are not allowed, using them may lead to 350 // Note: empty or deleted key values are not allowed, using them may lead to
346 // undefined behavior. For pointer keys this means that null pointers are not 351 // undefined behavior. For pointer keys this means that null pointers are not
347 // allowed unless you supply custom key traits. 352 // allowed unless you supply custom key traits.
348 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 353 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
349 class HashTable : public ConditionalDestructor<HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollected> { 354 class HashTable : public ConditionalDestructor<HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>, Allocator::isGarbageCollected> {
350 public: 355 public:
351 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator> iterator; 356 typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator> iterator;
352 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> const_iterator; 357 typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, K eyTraits, Allocator> const_iterator;
353 typedef Traits ValueTraits; 358 typedef Traits ValueTraits;
354 typedef Key KeyType; 359 typedef Key KeyType;
355 typedef typename KeyTraits::PeekInType KeyPeekInType; 360 typedef typename KeyTraits::PeekInType KeyPeekInType;
356 typedef typename KeyTraits::PassInType KeyPassInType; 361 typedef typename KeyTraits::PassInType KeyPassInType;
357 typedef Value ValueType; 362 typedef Value ValueType;
358 typedef Extractor ExtractorType; 363 typedef Extractor ExtractorType;
359 typedef KeyTraits KeyTraitsType; 364 typedef KeyTraits KeyTraitsType;
360 typedef typename Traits::PassInType ValuePassInType; 365 typedef typename Traits::PassInType ValuePassInType;
361 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType; 366 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
362 typedef HashTableAddResult<HashTable, ValueType> AddResult; 367 typedef HashTableAddResult<HashTable, ValueType> AddResult;
363 368
364 #if DUMP_HASHTABLE_STATS_PER_TABLE 369 #if DUMP_HASHTABLE_STATS_PER_TABLE
365 struct Stats { 370 struct Stats {
366 Stats() 371 Stats()
367 : numAccesses(0) 372 : numAccesses(0), numRehashes(0), numRemoves(0), numReinserts(0), maxCol lisions(0), numCollisions(0), collisionGraph() {
368 , numRehashes(0) 373 }
369 , numRemoves(0) 374
370 , numReinserts(0) 375 int numAccesses;
371 , maxCollisions(0) 376 int numRehashes;
372 , numCollisions(0) 377 int numRemoves;
373 , collisionGraph() 378 int numReinserts;
374 { 379
375 } 380 int maxCollisions;
376 381 int numCollisions;
377 int numAccesses; 382 int collisionGraph[4096];
378 int numRehashes; 383
379 int numRemoves; 384 void recordCollisionAtCount(int count) {
380 int numReinserts; 385 if (count > maxCollisions)
381 386 maxCollisions = count;
382 int maxCollisions; 387 numCollisions++;
383 int numCollisions; 388 collisionGraph[count]++;
384 int collisionGraph[4096]; 389 }
385 390
386 void recordCollisionAtCount(int count) 391 void dumpStats() {
387 { 392 dataLogF("\nWTF::HashTable::Stats dump\n\n");
388 if (count > maxCollisions) 393 dataLogF("%d accesses\n", numAccesses);
389 maxCollisions = count; 394 dataLogF("%d total collisions, average %.2f probes per access\n", numColli sions, 1.0 * (numAccesses + numCollisions) / numAccesses);
390 numCollisions++; 395 dataLogF("longest collision chain: %d\n", maxCollisions);
391 collisionGraph[count]++; 396 for (int i = 1; i <= maxCollisions; i++) {
392 } 397 dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collis ionGraph[i + 1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
393 398 }
394 void dumpStats() 399 dataLogF("%d rehashes\n", numRehashes);
395 { 400 dataLogF("%d reinserts\n", numReinserts);
396 dataLogF("\nWTF::HashTable::Stats dump\n\n"); 401 }
397 dataLogF("%d accesses\n", numAccesses); 402 };
398 dataLogF("%d total collisions, average %.2f probes per access\n", nu mCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses); 403 #endif
399 dataLogF("longest collision chain: %d\n", maxCollisions); 404
400 for (int i = 1; i <= maxCollisions; i++) { 405 HashTable();
401 dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f %% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses); 406 void finalize() {
402 } 407 ASSERT(!Allocator::isGarbageCollected);
403 dataLogF("%d rehashes\n", numRehashes); 408 if (LIKELY(!m_table))
404 dataLogF("%d reinserts\n", numReinserts); 409 return;
405 } 410 ASSERT(!m_accessForbidden);
406 }; 411 #if ENABLE(ASSERT)
407 #endif 412 m_accessForbidden = true;
408 413 #endif
409 HashTable(); 414 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
410 void finalize() 415 #if ENABLE(ASSERT)
411 { 416 m_accessForbidden = false;
412 ASSERT(!Allocator::isGarbageCollected); 417 #endif
413 if (LIKELY(!m_table)) 418 m_table = nullptr;
414 return; 419 }
415 ASSERT(!m_accessForbidden); 420
416 #if ENABLE(ASSERT) 421 HashTable(const HashTable&);
417 m_accessForbidden = true; 422 void swap(HashTable&);
418 #endif 423 HashTable& operator=(const HashTable&);
419 deleteAllBucketsAndDeallocate(m_table, m_tableSize); 424
420 #if ENABLE(ASSERT) 425 // When the hash table is empty, just return the same iterator for end as
421 m_accessForbidden = false; 426 // for begin. This is more efficient because we don't have to skip all the
422 #endif 427 // empty and deleted buckets, and iterating an empty table is a common case
423 m_table = nullptr; 428 // that's worth optimizing.
424 } 429 iterator begin() { return isEmpty() ? end() : makeIterator(m_table); }
425 430 iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
426 HashTable(const HashTable&); 431 const_iterator begin() const { return isEmpty() ? end() : makeConstIterator(m_ table); }
427 void swap(HashTable&); 432 const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tab leSize); }
428 HashTable& operator=(const HashTable&); 433
429 434 unsigned size() const {
430 // When the hash table is empty, just return the same iterator for end as 435 ASSERT(!m_accessForbidden);
431 // for begin. This is more efficient because we don't have to skip all the 436 return m_keyCount;
432 // empty and deleted buckets, and iterating an empty table is a common case 437 }
433 // that's worth optimizing. 438 unsigned capacity() const {
434 iterator begin() { return isEmpty() ? end() : makeIterator(m_table); } 439 ASSERT(!m_accessForbidden);
435 iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); } 440 return m_tableSize;
436 const_iterator begin() const { return isEmpty() ? end() : makeConstIterator( m_table); } 441 }
437 const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_t ableSize); } 442 bool isEmpty() const {
438 443 ASSERT(!m_accessForbidden);
439 unsigned size() const 444 return !m_keyCount;
440 { 445 }
441 ASSERT(!m_accessForbidden); 446
442 return m_keyCount; 447 void reserveCapacityForSize(unsigned size);
443 } 448
444 unsigned capacity() const 449 AddResult add(ValuePassInType value) {
445 { 450 return add<IdentityTranslatorType>(Extractor::extract(value), value);
446 ASSERT(!m_accessForbidden); 451 }
447 return m_tableSize; 452
448 } 453 // A special version of add() that finds the object by hashing and comparing
449 bool isEmpty() const 454 // with some other type, to avoid the cost of type conversion if the object
450 { 455 // is already in the table.
451 ASSERT(!m_accessForbidden); 456 template <typename HashTranslator, typename T, typename Extra>
452 return !m_keyCount; 457 AddResult add(const T& key, const Extra&);
453 } 458 template <typename HashTranslator, typename T, typename Extra>
454 459 AddResult addPassingHashCode(const T& key, const Extra&);
455 void reserveCapacityForSize(unsigned size); 460
456 461 iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
457 AddResult add(ValuePassInType value) 462 const_iterator find(KeyPeekInType key) const { return find<IdentityTranslatorT ype>(key); }
458 { 463 bool contains(KeyPeekInType key) const { return contains<IdentityTranslatorTyp e>(key); }
459 return add<IdentityTranslatorType>(Extractor::extract(value), value); 464
460 } 465 template <typename HashTranslator, typename T>
461 466 iterator find(const T&);
462 // A special version of add() that finds the object by hashing and comparing 467 template <typename HashTranslator, typename T>
463 // with some other type, to avoid the cost of type conversion if the object 468 const_iterator find(const T&) const;
464 // is already in the table. 469 template <typename HashTranslator, typename T>
465 template <typename HashTranslator, typename T, typename Extra> AddResult add (const T& key, const Extra&); 470 bool contains(const T&) const;
466 template <typename HashTranslator, typename T, typename Extra> AddResult add PassingHashCode(const T& key, const Extra&); 471
467 472 void remove(KeyPeekInType);
468 iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); } 473 void remove(iterator);
469 const_iterator find(KeyPeekInType key) const { return find<IdentityTranslato rType>(key); } 474 void remove(const_iterator);
470 bool contains(KeyPeekInType key) const { return contains<IdentityTranslatorT ype>(key); } 475 void clear();
471 476
472 template <typename HashTranslator, typename T> iterator find(const T&); 477 static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmptyVa lue<KeyTraits>(Extractor::extract(value)); }
473 template <typename HashTranslator, typename T> const_iterator find(const T&) const; 478 static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDele tedValue(Extractor::extract(value)); }
474 template <typename HashTranslator, typename T> bool contains(const T&) const ; 479 static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTableH elper<ValueType, Extractor, KeyTraits>::isEmptyOrDeletedBucket(value); }
475 480
476 void remove(KeyPeekInType); 481 ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, K eyPeekInType>(key); }
477 void remove(iterator); 482 template <typename HashTranslator, typename T>
478 void remove(const_iterator); 483 ValueType* lookup(T);
479 void clear(); 484 template <typename HashTranslator, typename T>
480 485 const ValueType* lookup(T) const;
481 static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmpty Value<KeyTraits>(Extractor::extract(value)); } 486
482 static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDe letedValue(Extractor::extract(value)); } 487 template <typename VisitorDispatcher>
483 static bool isEmptyOrDeletedBucket(const ValueType& value) { return HashTabl eHelper<ValueType, Extractor, KeyTraits>:: isEmptyOrDeletedBucket(value); } 488 void trace(VisitorDispatcher);
484 489
485 ValueType* lookup(KeyPeekInType key) { return lookup<IdentityTranslatorType, KeyPeekInType>(key); } 490 #if ENABLE(ASSERT)
486 template <typename HashTranslator, typename T> ValueType* lookup(T); 491 bool accessForbidden() const { return m_accessForbidden; }
487 template <typename HashTranslator, typename T> const ValueType* lookup(T) co nst; 492 int64_t modifications() const { return m_modifications; }
488 493 void registerModification() { m_modifications++; }
489 template <typename VisitorDispatcher> void trace(VisitorDispatcher); 494 // HashTable and collections that build on it do not support modifications
490 495 // while there is an iterator in use. The exception is ListHashSet, which
491 #if ENABLE(ASSERT) 496 // has its own iterators that tolerate modification of the underlying set.
492 bool accessForbidden() const { return m_accessForbidden; } 497 void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications); }
493 int64_t modifications() const { return m_modifications; }
494 void registerModification() { m_modifications++; }
495 // HashTable and collections that build on it do not support modifications
496 // while there is an iterator in use. The exception is ListHashSet, which
497 // has its own iterators that tolerate modification of the underlying set.
498 void checkModifications(int64_t mods) const { ASSERT(mods == m_modifications ); }
499 #else 498 #else
500 int64_t modifications() const { return 0; } 499 int64_t modifications() const { return 0; }
501 void registerModification() {} 500 void registerModification() {}
502 void checkModifications(int64_t mods) const {} 501 void checkModifications(int64_t mods) const {}
503 #endif 502 #endif
504 503
505 private: 504 private:
506 static ValueType* allocateTable(unsigned size); 505 static ValueType* allocateTable(unsigned size);
507 static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size); 506 static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
508 507
509 typedef std::pair<ValueType*, bool> LookupType; 508 typedef std::pair<ValueType*, bool> LookupType;
510 typedef std::pair<LookupType, unsigned> FullLookupType; 509 typedef std::pair<LookupType, unsigned> FullLookupType;
511 510
512 LookupType lookupForWriting(const Key& key) { return lookupForWriting<Identi tyTranslatorType>(key); } 511 LookupType lookupForWriting(const Key& key) { return lookupForWriting<Identity TranslatorType>(key); }
513 template <typename HashTranslator, typename T> FullLookupType fullLookupForW riting(const T&); 512 template <typename HashTranslator, typename T>
514 template <typename HashTranslator, typename T> LookupType lookupForWriting(c onst T&); 513 FullLookupType fullLookupForWriting(const T&);
515 514 template <typename HashTranslator, typename T>
516 void remove(ValueType*); 515 LookupType lookupForWriting(const T&);
517 516
518 bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; } 517 void remove(ValueType*);
519 bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; } 518
520 bool shouldShrink() const 519 bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad > = m_tableSize; }
521 { 520 bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; }
522 // isAllocationAllowed check should be at the last because it's 521 bool shouldShrink() const {
523 // expensive. 522 // isAllocationAllowed check should be at the last because it's
524 return m_keyCount * m_minLoad < m_tableSize 523 // expensive.
525 && m_tableSize > KeyTraits::minimumTableSize 524 return m_keyCount * m_minLoad < m_tableSize && m_tableSize > KeyTraits::mini mumTableSize && Allocator::isAllocationAllowed();
526 && Allocator::isAllocationAllowed(); 525 }
527 } 526 ValueType* expand(ValueType* entry = 0);
528 ValueType* expand(ValueType* entry = 0); 527 void shrink() { rehash(m_tableSize / 2, 0); }
529 void shrink() { rehash(m_tableSize / 2, 0); } 528
530 529 ValueType* expandBuffer(unsigned newTableSize, ValueType* entry, bool&);
531 ValueType* expandBuffer(unsigned newTableSize, ValueType* entry, bool&); 530 ValueType* rehashTo(ValueType* newTable, unsigned newTableSize, ValueType* ent ry);
532 ValueType* rehashTo(ValueType* newTable, unsigned newTableSize, ValueType* e ntry); 531 ValueType* rehash(unsigned newTableSize, ValueType* entry);
533 ValueType* rehash(unsigned newTableSize, ValueType* entry); 532 ValueType* reinsert(ValueType&);
534 ValueType* reinsert(ValueType&); 533
535 534 static void initializeBucket(ValueType& bucket);
536 static void initializeBucket(ValueType& bucket); 535 static void deleteBucket(ValueType& bucket) {
537 static void deleteBucket(ValueType& bucket) 536 bucket.~ValueType();
538 { 537 Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected);
539 bucket.~ValueType(); 538 }
540 Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected); 539
541 } 540 FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash ) { return FullLookupType(LookupType(position, found), hash); }
542 541
543 FullLookupType makeLookupResult(ValueType* position, bool found, unsigned ha sh) 542 iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_table Size, this); }
544 { return FullLookupType(LookupType(position, found), hash); } 543 const_iterator makeConstIterator(ValueType* pos) const { return const_iterator (pos, m_table + m_tableSize, this); }
545 544 iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
546 iterator makeIterator(ValueType* pos) { return iterator(pos, m_table + m_tab leSize, this); } 545 const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const _iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); }
547 const_iterator makeConstIterator(ValueType* pos) const { return const_iterat or(pos, m_table + m_tableSize, this); } 546
548 iterator makeKnownGoodIterator(ValueType* pos) { return iterator(pos, m_tabl e + m_tableSize, this, HashItemKnownGood); } 547 static const unsigned m_maxLoad = 2;
549 const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return con st_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); } 548 static const unsigned m_minLoad = 6;
550 549
551 static const unsigned m_maxLoad = 2; 550 unsigned tableSizeMask() const {
552 static const unsigned m_minLoad = 6; 551 size_t mask = m_tableSize - 1;
553 552 ASSERT((mask & m_tableSize) == 0);
554 unsigned tableSizeMask() const 553 return mask;
555 { 554 }
556 size_t mask = m_tableSize - 1; 555
557 ASSERT((mask & m_tableSize) == 0); 556 void setEnqueued() { m_queueFlag = true; }
558 return mask; 557 void clearEnqueued() { m_queueFlag = false; }
559 } 558 bool enqueued() { return m_queueFlag; }
560 559
561 void setEnqueued() { m_queueFlag = true; } 560 ValueType* m_table;
562 void clearEnqueued() { m_queueFlag = false; } 561 unsigned m_tableSize;
563 bool enqueued() { return m_queueFlag; } 562 unsigned m_keyCount;
564 563 #if ENABLE(ASSERT)
565 ValueType* m_table; 564 unsigned m_deletedCount : 30;
566 unsigned m_tableSize; 565 unsigned m_queueFlag : 1;
567 unsigned m_keyCount; 566 unsigned m_accessForbidden : 1;
568 #if ENABLE(ASSERT) 567 unsigned m_modifications;
569 unsigned m_deletedCount:30;
570 unsigned m_queueFlag:1;
571 unsigned m_accessForbidden:1;
572 unsigned m_modifications;
573 #else 568 #else
574 unsigned m_deletedCount:31; 569 unsigned m_deletedCount : 31;
575 unsigned m_queueFlag:1; 570 unsigned m_queueFlag : 1;
576 #endif 571 #endif
577 572
578 #if DUMP_HASHTABLE_STATS_PER_TABLE 573 #if DUMP_HASHTABLE_STATS_PER_TABLE
579 public: 574 public:
580 mutable OwnPtr<Stats> m_stats; 575 mutable OwnPtr<Stats> m_stats;
581 #endif 576 #endif
582 577
583 template <WeakHandlingFlag x, typename T, typename U, typename V, typename W , typename X, typename Y, typename Z> friend struct WeakProcessingHashTableHelpe r; 578 template <WeakHandlingFlag x, typename T, typename U, typename V, typename W, typename X, typename Y, typename Z>
584 template <typename T, typename U, typename V, typename W> friend class Linke dHashSet; 579 friend struct WeakProcessingHashTableHelper;
580 template <typename T, typename U, typename V, typename W>
581 friend class LinkedHashSet;
585 }; 582 };
586 583
587 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 584 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
588 inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::HashTable() 585 inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::HashTable()
589 : m_table(nullptr) 586 : m_table(nullptr), m_tableSize(0), m_keyCount(0), m_deletedCount(0), m_queu eFlag(false)
590 , m_tableSize(0) 587 #if ENABLE(ASSERT)
591 , m_keyCount(0) 588 ,
592 , m_deletedCount(0) 589 m_accessForbidden(false),
593 , m_queueFlag(false) 590 m_modifications(0)
594 #if ENABLE(ASSERT)
595 , m_accessForbidden(false)
596 , m_modifications(0)
597 #endif 591 #endif
598 #if DUMP_HASHTABLE_STATS_PER_TABLE 592 #if DUMP_HASHTABLE_STATS_PER_TABLE
599 , m_stats(adoptPtr(new Stats)) 593 ,
594 m_stats(adoptPtr(new Stats))
600 #endif 595 #endif
601 { 596 {
602 static_assert(Allocator::isGarbageCollected || (!IsPointerToGarbageCollected Type<Key>::value && !IsPointerToGarbageCollectedType<Value>::value), "Cannot put raw pointers to garbage-collected classes into an off-heap collection."); 597 static_assert(Allocator::isGarbageCollected || (!IsPointerToGarbageCollectedTy pe<Key>::value && !IsPointerToGarbageCollectedType<Value>::value), "Cannot put r aw pointers to garbage-collected classes into an off-heap collection.");
603 } 598 }
604 599
605 inline unsigned doubleHash(unsigned key) 600 inline unsigned doubleHash(unsigned key) {
601 key = ~key + (key >> 23);
602 key ^= (key << 12);
603 key ^= (key >> 7);
604 key ^= (key << 2);
605 key ^= (key >> 20);
606 return key;
607 }
608
609 inline unsigned calculateCapacity(unsigned size) {
610 for (unsigned mask = size; mask; mask >>= 1)
611 size |= mask; // 00110101010 -> 00111111111
612 return (size + 1) * 2; // 00111111111 -> 10000000000
613 }
614
615 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
616 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::reserveCapacityForSize(unsigned newSize) {
617 unsigned newCapacity = calculateCapacity(newSize);
618 if (newCapacity < KeyTraits::minimumTableSize)
619 newCapacity = KeyTraits::minimumTableSize;
620
621 if (newCapacity > capacity()) {
622 RELEASE_ASSERT(!static_cast<int>(newCapacity >> 31)); // HashTable capacity should not overflow 32bit int.
623 rehash(newCapacity, 0);
624 }
625 }
626
627 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
628 template <typename HashTranslator, typename T>
629 inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key) {
630 return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTrans lator, T>(key));
631 }
632
633 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
634 template <typename HashTranslator, typename T>
635 inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator>::lookup(T key) const {
636 ASSERT(!m_accessForbidden);
637 ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeToCo mpareToEmptyOrDeleted>::checkKey(key)));
638 const ValueType* table = m_table;
639 if (!table)
640 return nullptr;
641
642 size_t k = 0;
643 size_t sizeMask = tableSizeMask();
644 unsigned h = HashTranslator::hash(key);
645 size_t i = h & sizeMask;
646
647 UPDATE_ACCESS_COUNTS();
648
649 while (1) {
650 const ValueType* entry = table + i;
651
652 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
653 if (HashTranslator::equal(Extractor::extract(*entry), key))
654 return entry;
655
656 if (isEmptyBucket(*entry))
657 return nullptr;
658 } else {
659 if (isEmptyBucket(*entry))
660 return nullptr;
661
662 if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(* entry), key))
663 return entry;
664 }
665 UPDATE_PROBE_COUNTS();
666 if (!k)
667 k = 1 | doubleHash(h);
668 i = (i + k) & sizeMask;
669 }
670 }
671
672 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
673 template <typename HashTranslator, typename T>
674 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits , KeyTraits, Allocator>::lookupForWriting(const T& key) {
675 ASSERT(!m_accessForbidden);
676 ASSERT(m_table);
677 registerModification();
678
679 ValueType* table = m_table;
680 size_t k = 0;
681 size_t sizeMask = tableSizeMask();
682 unsigned h = HashTranslator::hash(key);
683 size_t i = h & sizeMask;
684
685 UPDATE_ACCESS_COUNTS();
686
687 ValueType* deletedEntry = nullptr;
688
689 while (1) {
690 ValueType* entry = table + i;
691
692 if (isEmptyBucket(*entry))
693 return LookupType(deletedEntry ? deletedEntry : entry, false);
694
695 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
696 if (HashTranslator::equal(Extractor::extract(*entry), key))
697 return LookupType(entry, true);
698
699 if (isDeletedBucket(*entry))
700 deletedEntry = entry;
701 } else {
702 if (isDeletedBucket(*entry))
703 deletedEntry = entry;
704 else if (HashTranslator::equal(Extractor::extract(*entry), key))
705 return LookupType(entry, true);
706 }
707 UPDATE_PROBE_COUNTS();
708 if (!k)
709 k = 1 | doubleHash(h);
710 i = (i + k) & sizeMask;
711 }
712 }
713
714 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
715 template <typename HashTranslator, typename T>
716 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Tr aits, KeyTraits, Allocator>::fullLookupForWriting(const T& key) {
717 ASSERT(!m_accessForbidden);
718 ASSERT(m_table);
719 registerModification();
720
721 ValueType* table = m_table;
722 size_t k = 0;
723 size_t sizeMask = tableSizeMask();
724 unsigned h = HashTranslator::hash(key);
725 size_t i = h & sizeMask;
726
727 UPDATE_ACCESS_COUNTS();
728
729 ValueType* deletedEntry = nullptr;
730
731 while (1) {
732 ValueType* entry = table + i;
733
734 if (isEmptyBucket(*entry))
735 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
736
737 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
738 if (HashTranslator::equal(Extractor::extract(*entry), key))
739 return makeLookupResult(entry, true, h);
740
741 if (isDeletedBucket(*entry))
742 deletedEntry = entry;
743 } else {
744 if (isDeletedBucket(*entry))
745 deletedEntry = entry;
746 else if (HashTranslator::equal(Extractor::extract(*entry), key))
747 return makeLookupResult(entry, true, h);
748 }
749 UPDATE_PROBE_COUNTS();
750 if (!k)
751 k = 1 | doubleHash(h);
752 i = (i + k) & sizeMask;
753 }
754 }
755
756 template <bool emptyValueIsZero>
757 struct HashTableBucketInitializer;
758
759 template <>
760 struct HashTableBucketInitializer<false> {
761 template <typename Traits, typename Value>
762 static void initialize(Value& bucket) {
763 new (NotNull, &bucket) Value(Traits::emptyValue());
764 }
765 };
766
767 template <>
768 struct HashTableBucketInitializer<true> {
769 template <typename Traits, typename Value>
770 static void initialize(Value& bucket) {
771 // This initializes the bucket without copying the empty value. That
772 // makes it possible to use this with types that don't support copying.
773 // The memset to 0 looks like a slow operation but is optimized by the
774 // compilers.
775 memset(&bucket, 0, sizeof(bucket));
776 }
777 };
778
779 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
780 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::initializeBucket(ValueType& bucket) {
781 HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Trai ts>(bucket);
782 }
783
784 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
785 template <typename HashTranslator, typename T, typename Extra>
786 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allo cator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator>::add(const T& key, const Extra& extra) {
787 ASSERT(!m_accessForbidden);
788 ASSERT(Allocator::isAllocationAllowed());
789 if (!m_table)
790 expand();
791
792 ASSERT(m_table);
793
794 ValueType* table = m_table;
795 size_t k = 0;
796 size_t sizeMask = tableSizeMask();
797 unsigned h = HashTranslator::hash(key);
798 size_t i = h & sizeMask;
799
800 UPDATE_ACCESS_COUNTS();
801
802 ValueType* deletedEntry = nullptr;
803 ValueType* entry;
804 while (1) {
805 entry = table + i;
806
807 if (isEmptyBucket(*entry))
808 break;
809
810 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
811 if (HashTranslator::equal(Extractor::extract(*entry), key))
812 return AddResult(this, entry, false);
813
814 if (isDeletedBucket(*entry))
815 deletedEntry = entry;
816 } else {
817 if (isDeletedBucket(*entry))
818 deletedEntry = entry;
819 else if (HashTranslator::equal(Extractor::extract(*entry), key))
820 return AddResult(this, entry, false);
821 }
822 UPDATE_PROBE_COUNTS();
823 if (!k)
824 k = 1 | doubleHash(h);
825 i = (i + k) & sizeMask;
826 }
827
828 registerModification();
829
830 if (deletedEntry) {
831 // Overwrite any data left over from last use, using placement new or
832 // memset.
833 initializeBucket(*deletedEntry);
834 entry = deletedEntry;
835 --m_deletedCount;
836 }
837
838 HashTranslator::translate(*entry, key, extra);
839 ASSERT(!isEmptyOrDeletedBucket(*entry));
840
841 ++m_keyCount;
842
843 if (shouldExpand())
844 entry = expand(entry);
845
846 return AddResult(this, entry, true);
847 }
848
849 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
850 template <typename HashTranslator, typename T, typename Extra>
851 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allo cator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator>::addPassingHashCode(const T& key, const Extra& extra) {
852 ASSERT(!m_accessForbidden);
853 ASSERT(Allocator::isAllocationAllowed());
854 if (!m_table)
855 expand();
856
857 FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
858
859 ValueType* entry = lookupResult.first.first;
860 bool found = lookupResult.first.second;
861 unsigned h = lookupResult.second;
862
863 if (found)
864 return AddResult(this, entry, false);
865
866 registerModification();
867
868 if (isDeletedBucket(*entry)) {
869 initializeBucket(*entry);
870 --m_deletedCount;
871 }
872
873 HashTranslator::translate(*entry, key, extra, h);
874 ASSERT(!isEmptyOrDeletedBucket(*entry));
875
876 ++m_keyCount;
877 if (shouldExpand())
878 entry = expand(entry);
879
880 return AddResult(this, entry, true);
881 }
882
883 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
884 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::reinsert(ValueType& entry) {
885 ASSERT(m_table);
886 registerModification();
887 ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
888 ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first))) ;
889 #if DUMP_HASHTABLE_STATS
890 atomicIncrement(&HashTableStats::numReinserts);
891 #endif
892 #if DUMP_HASHTABLE_STATS_PER_TABLE
893 ++m_stats->numReinserts;
894 #endif
895 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
896 Mover<ValueType, Allocator>::move(entry, *newEntry);
897
898 return newEntry;
899 }
900
901 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
902 template <typename HashTranslator, typename T>
903 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key) {
904 ValueType* entry = lookup<HashTranslator>(key);
905 if (!entry)
906 return end();
907
908 return makeKnownGoodIterator(entry);
909 }
910
911 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
912 template <typename HashTranslator, typename T>
913 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Tr aits, KeyTraits, Allocator>::find(const T& key) const {
914 ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
915 if (!entry)
916 return end();
917
918 return makeKnownGoodConstIterator(entry);
919 }
920
921 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
922 template <typename HashTranslator, typename T>
923 bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::contains(const T& key) const {
924 return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
925 }
926
927 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
928 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::remove(ValueType* pos) {
929 registerModification();
930 #if DUMP_HASHTABLE_STATS
931 atomicIncrement(&HashTableStats::numRemoves);
932 #endif
933 #if DUMP_HASHTABLE_STATS_PER_TABLE
934 ++m_stats->numRemoves;
935 #endif
936
937 ASSERT(!m_accessForbidden);
938 #if ENABLE(ASSERT)
939 m_accessForbidden = true;
940 #endif
941 deleteBucket(*pos);
942 #if ENABLE(ASSERT)
943 m_accessForbidden = false;
944 #endif
945 ++m_deletedCount;
946 --m_keyCount;
947
948 if (shouldShrink())
949 shrink();
950 }
951
952 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
953 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(iterator it) {
954 if (it == end())
955 return;
956 remove(const_cast<ValueType*>(it.m_iterator.m_position));
957 }
958
959 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
960 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(const_iterator it) {
961 if (it == end())
962 return;
963 remove(const_cast<ValueType*>(it.m_position));
964 }
965
966 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
967 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(KeyPeekInType key) {
968 remove(find(key));
969 }
970
971 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
972 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::allocateTable(unsigned size) {
973 size_t allocSize = size * sizeof(ValueType);
974 ValueType* result;
975 // Assert that we will not use memset on things with a vtable entry. The
976 // compiler will also check this on some platforms. We would like to check
977 // this on the whole value (key-value pair), but IsPolymorphic will return
978 // false for a pair of two types, even if one of the components is
979 // polymorphic.
980 static_assert(!Traits::emptyValueIsZero || !IsPolymorphic<KeyType>::value, "em pty value cannot be zero for things with a vtable");
981
982 #if ENABLE(OILPAN)
983 static_assert(Allocator::isGarbageCollected || ((!AllowsOnlyPlacementNew<KeyTy pe>::value || !NeedsTracing<KeyType>::value) && (!AllowsOnlyPlacementNew<ValueTy pe>::value || !NeedsTracing<ValueType>::value)), "Cannot put DISALLOW_NEW_EXCEPT _PLACEMENT_NEW objects that have trace methods into an off-heap HashTable");
984 #endif
985 if (Traits::emptyValueIsZero) {
986 result = Allocator::template allocateZeroedHashTableBacking<ValueType, HashT able>(allocSize);
987 } else {
988 result = Allocator::template allocateHashTableBacking<ValueType, HashTable>( allocSize);
989 for (unsigned i = 0; i < size; i++)
990 initializeBucket(result[i]);
991 }
992 return result;
993 }
994
995 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
996 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size) {
997 if (!IsTriviallyDestructible<ValueType>::value) {
998 for (unsigned i = 0; i < size; ++i) {
999 // This code is called when the hash table is cleared or resized. We
1000 // have allocated a new backing store and we need to run the
1001 // destructors on the old backing store, as it is being freed. If we
1002 // are GCing we need to both call the destructor and mark the bucket
1003 // as deleted, otherwise the destructor gets called again when the
1004 // GC finds the backing store. With the default allocator it's
1005 // enough to call the destructor, since we will free the memory
1006 // explicitly and we won't see the memory with the bucket again.
1007 if (Allocator::isGarbageCollected) {
1008 if (!isEmptyOrDeletedBucket(table[i]))
1009 deleteBucket(table[i]);
1010 } else {
1011 if (!isDeletedBucket(table[i]))
1012 table[i].~ValueType();
1013 }
1014 }
1015 }
1016 Allocator::freeHashTableBacking(table);
1017 }
1018
1019 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1020 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::expand(Value* entry) {
1021 unsigned newSize;
1022 if (!m_tableSize) {
1023 newSize = KeyTraits::minimumTableSize;
1024 } else if (mustRehashInPlace()) {
1025 newSize = m_tableSize;
1026 } else {
1027 newSize = m_tableSize * 2;
1028 RELEASE_ASSERT(newSize > m_tableSize);
1029 }
1030
1031 return rehash(newSize, entry);
1032 }
1033
1034 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1035 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::expandBuffer(unsigned newTableSize, Value* entry, bool& success) {
1036 success = false;
1037 ASSERT(m_tableSize < newTableSize);
1038 if (!Allocator::expandHashTableBacking(m_table, newTableSize * sizeof(ValueTyp e)))
1039 return nullptr;
1040
1041 success = true;
1042
1043 Value* newEntry = nullptr;
1044 unsigned oldTableSize = m_tableSize;
1045 ValueType* originalTable = m_table;
1046
1047 ValueType* temporaryTable = allocateTable(oldTableSize);
1048 for (unsigned i = 0; i < oldTableSize; i++) {
1049 if (&m_table[i] == entry)
1050 newEntry = &temporaryTable[i];
1051 if (isEmptyOrDeletedBucket(m_table[i])) {
1052 ASSERT(&m_table[i] != entry);
1053 if (Traits::emptyValueIsZero) {
1054 memset(&temporaryTable[i], 0, sizeof(ValueType));
1055 } else {
1056 initializeBucket(temporaryTable[i]);
1057 }
1058 } else {
1059 Mover<ValueType, Allocator>::move(m_table[i], temporaryTable[i]);
1060 }
1061 }
1062 m_table = temporaryTable;
1063
1064 if (Traits::emptyValueIsZero) {
1065 memset(originalTable, 0, newTableSize * sizeof(ValueType));
1066 } else {
1067 for (unsigned i = 0; i < newTableSize; i++)
1068 initializeBucket(originalTable[i]);
1069 }
1070 newEntry = rehashTo(originalTable, newTableSize, newEntry);
1071
1072 ASSERT(!m_accessForbidden);
1073 #if ENABLE(ASSERT)
1074 m_accessForbidden = true;
1075 #endif
1076 deleteAllBucketsAndDeallocate(temporaryTable, oldTableSize);
1077 #if ENABLE(ASSERT)
1078 m_accessForbidden = false;
1079 #endif
1080
1081 return newEntry;
1082 }
1083
1084 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1085 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry) {
1086 unsigned oldTableSize = m_tableSize;
1087 ValueType* oldTable = m_table;
1088
1089 #if DUMP_HASHTABLE_STATS
1090 if (oldTableSize != 0)
1091 atomicIncrement(&HashTableStats::numRehashes);
1092 #endif
1093
1094 #if DUMP_HASHTABLE_STATS_PER_TABLE
1095 if (oldTableSize != 0)
1096 ++m_stats->numRehashes;
1097 #endif
1098
1099 m_table = newTable;
1100 m_tableSize = newTableSize;
1101
1102 Value* newEntry = nullptr;
1103 for (unsigned i = 0; i != oldTableSize; ++i) {
1104 if (isEmptyOrDeletedBucket(oldTable[i])) {
1105 ASSERT(&oldTable[i] != entry);
1106 continue;
1107 }
1108 Value* reinsertedEntry = reinsert(oldTable[i]);
1109 if (&oldTable[i] == entry) {
1110 ASSERT(!newEntry);
1111 newEntry = reinsertedEntry;
1112 }
1113 }
1114
1115 m_deletedCount = 0;
1116
1117 return newEntry;
1118 }
1119
1120 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1121 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::rehash(unsigned newTableSize, Value* entry) {
1122 unsigned oldTableSize = m_tableSize;
1123 ValueType* oldTable = m_table;
1124
1125 #if DUMP_HASHTABLE_STATS
1126 if (oldTableSize != 0)
1127 atomicIncrement(&HashTableStats::numRehashes);
1128 #endif
1129
1130 #if DUMP_HASHTABLE_STATS_PER_TABLE
1131 if (oldTableSize != 0)
1132 ++m_stats->numRehashes;
1133 #endif
1134
1135 // The Allocator::isGarbageCollected check is not needed. The check is just
1136 // a static hint for a compiler to indicate that Base::expandBuffer returns
1137 // false if Allocator is a PartitionAllocator.
1138 if (Allocator::isGarbageCollected && newTableSize > oldTableSize) {
1139 bool success;
1140 Value* newEntry = expandBuffer(newTableSize, entry, success);
1141 if (success)
1142 return newEntry;
1143 }
1144
1145 ValueType* newTable = allocateTable(newTableSize);
1146 Value* newEntry = rehashTo(newTable, newTableSize, entry);
1147
1148 ASSERT(!m_accessForbidden);
1149 #if ENABLE(ASSERT)
1150 m_accessForbidden = true;
1151 #endif
1152 deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1153 #if ENABLE(ASSERT)
1154 m_accessForbidden = false;
1155 #endif
1156
1157 return newEntry;
1158 }
1159
1160 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1161 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::clear() {
1162 registerModification();
1163 if (!m_table)
1164 return;
1165
1166 ASSERT(!m_accessForbidden);
1167 #if ENABLE(ASSERT)
1168 m_accessForbidden = true;
1169 #endif
1170 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1171 #if ENABLE(ASSERT)
1172 m_accessForbidden = false;
1173 #endif
1174 m_table = nullptr;
1175 m_tableSize = 0;
1176 m_keyCount = 0;
1177 }
1178
1179 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1180 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::H ashTable(const HashTable& other)
1181 : m_table(nullptr), m_tableSize(0), m_keyCount(0), m_deletedCount(0), m_queu eFlag(false)
1182 #if ENABLE(ASSERT)
1183 ,
1184 m_accessForbidden(false),
1185 m_modifications(0)
1186 #endif
1187 #if DUMP_HASHTABLE_STATS_PER_TABLE
1188 ,
1189 m_stats(adoptPtr(new Stats(*other.m_stats)))
1190 #endif
606 { 1191 {
607 key = ~key + (key >> 23); 1192 // Copy the hash table the dumb way, by adding each element to the new
608 key ^= (key << 12); 1193 // table. It might be more efficient to copy the table slots, but it's not
609 key ^= (key >> 7); 1194 // clear that efficiency is needed.
610 key ^= (key << 2); 1195 const_iterator end = other.end();
611 key ^= (key >> 20); 1196 for (const_iterator it = other.begin(); it != end; ++it)
612 return key; 1197 add(*it);
613 } 1198 }
614 1199
615 inline unsigned calculateCapacity(unsigned size) 1200 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
616 { 1201 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::swap(HashTable& other) {
617 for (unsigned mask = size; mask; mask >>= 1) 1202 ASSERT(!m_accessForbidden);
618 size |= mask; // 00110101010 -> 00111111111 1203 std::swap(m_table, other.m_table);
619 return (size + 1) * 2; // 00111111111 -> 10000000000 1204 std::swap(m_tableSize, other.m_tableSize);
620 } 1205 std::swap(m_keyCount, other.m_keyCount);
621 1206 // std::swap does not work for bit fields.
622 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 1207 unsigned deleted = m_deletedCount;
623 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::reserveCapacityForSize(unsigned newSize) 1208 m_deletedCount = other.m_deletedCount;
624 { 1209 other.m_deletedCount = deleted;
625 unsigned newCapacity = calculateCapacity(newSize); 1210 ASSERT(!m_queueFlag);
626 if (newCapacity < KeyTraits::minimumTableSize) 1211 ASSERT(!other.m_queueFlag);
627 newCapacity = KeyTraits::minimumTableSize; 1212
628 1213 #if ENABLE(ASSERT)
629 if (newCapacity > capacity()) { 1214 std::swap(m_modifications, other.m_modifications);
630 RELEASE_ASSERT(!static_cast<int>(newCapacity >> 31)); // HashTable capac ity should not overflow 32bit int. 1215 #endif
631 rehash(newCapacity, 0); 1216
632 }
633 }
634
635 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
636 template <typename HashTranslator, typename T>
637 inline Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::lookup(T key)
638 {
639 return const_cast<Value*>(const_cast<const HashTable*>(this)->lookup<HashTra nslator, T>(key));
640 }
641
642 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
643 template <typename HashTranslator, typename T>
644 inline const Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyT raits, Allocator>::lookup(T key) const
645 {
646 ASSERT(!m_accessForbidden);
647 ASSERT((HashTableKeyChecker<HashTranslator, KeyTraits, HashFunctions::safeTo CompareToEmptyOrDeleted>::checkKey(key)));
648 const ValueType* table = m_table;
649 if (!table)
650 return nullptr;
651
652 size_t k = 0;
653 size_t sizeMask = tableSizeMask();
654 unsigned h = HashTranslator::hash(key);
655 size_t i = h & sizeMask;
656
657 UPDATE_ACCESS_COUNTS();
658
659 while (1) {
660 const ValueType* entry = table + i;
661
662 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
663 if (HashTranslator::equal(Extractor::extract(*entry), key))
664 return entry;
665
666 if (isEmptyBucket(*entry))
667 return nullptr;
668 } else {
669 if (isEmptyBucket(*entry))
670 return nullptr;
671
672 if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::ext ract(*entry), key))
673 return entry;
674 }
675 UPDATE_PROBE_COUNTS();
676 if (!k)
677 k = 1 | doubleHash(h);
678 i = (i + k) & sizeMask;
679 }
680 }
681
682 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
683 template <typename HashTranslator, typename T>
684 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::LookupType HashTable<Key, Value, Extractor, HashFunctions, Traits , KeyTraits, Allocator>::lookupForWriting(const T& key)
685 {
686 ASSERT(!m_accessForbidden);
687 ASSERT(m_table);
688 registerModification();
689
690 ValueType* table = m_table;
691 size_t k = 0;
692 size_t sizeMask = tableSizeMask();
693 unsigned h = HashTranslator::hash(key);
694 size_t i = h & sizeMask;
695
696 UPDATE_ACCESS_COUNTS();
697
698 ValueType* deletedEntry = nullptr;
699
700 while (1) {
701 ValueType* entry = table + i;
702
703 if (isEmptyBucket(*entry))
704 return LookupType(deletedEntry ? deletedEntry : entry, false);
705
706 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
707 if (HashTranslator::equal(Extractor::extract(*entry), key))
708 return LookupType(entry, true);
709
710 if (isDeletedBucket(*entry))
711 deletedEntry = entry;
712 } else {
713 if (isDeletedBucket(*entry))
714 deletedEntry = entry;
715 else if (HashTranslator::equal(Extractor::extract(*entry), key))
716 return LookupType(entry, true);
717 }
718 UPDATE_PROBE_COUNTS();
719 if (!k)
720 k = 1 | doubleHash(h);
721 i = (i + k) & sizeMask;
722 }
723 }
724
725 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
726 template <typename HashTranslator, typename T>
727 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::FullLookupType HashTable<Key, Value, Extractor, HashFunctions, Tr aits, KeyTraits, Allocator>::fullLookupForWriting(const T& key)
728 {
729 ASSERT(!m_accessForbidden);
730 ASSERT(m_table);
731 registerModification();
732
733 ValueType* table = m_table;
734 size_t k = 0;
735 size_t sizeMask = tableSizeMask();
736 unsigned h = HashTranslator::hash(key);
737 size_t i = h & sizeMask;
738
739 UPDATE_ACCESS_COUNTS();
740
741 ValueType* deletedEntry = nullptr;
742
743 while (1) {
744 ValueType* entry = table + i;
745
746 if (isEmptyBucket(*entry))
747 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
748
749 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
750 if (HashTranslator::equal(Extractor::extract(*entry), key))
751 return makeLookupResult(entry, true, h);
752
753 if (isDeletedBucket(*entry))
754 deletedEntry = entry;
755 } else {
756 if (isDeletedBucket(*entry))
757 deletedEntry = entry;
758 else if (HashTranslator::equal(Extractor::extract(*entry), key))
759 return makeLookupResult(entry, true, h);
760 }
761 UPDATE_PROBE_COUNTS();
762 if (!k)
763 k = 1 | doubleHash(h);
764 i = (i + k) & sizeMask;
765 }
766 }
767
768 template <bool emptyValueIsZero> struct HashTableBucketInitializer;
769
770 template <> struct HashTableBucketInitializer<false> {
771 template <typename Traits, typename Value> static void initialize(Value& buc ket)
772 {
773 new (NotNull, &bucket) Value(Traits::emptyValue());
774 }
775 };
776
777 template <> struct HashTableBucketInitializer<true> {
778 template <typename Traits, typename Value> static void initialize(Value& buc ket)
779 {
780 // This initializes the bucket without copying the empty value. That
781 // makes it possible to use this with types that don't support copying.
782 // The memset to 0 looks like a slow operation but is optimized by the
783 // compilers.
784 memset(&bucket, 0, sizeof(bucket));
785 }
786 };
787
788 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
789 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::initializeBucket(ValueType& bucket)
790 {
791 HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Tr aits>(bucket);
792 }
793
794 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
795 template <typename HashTranslator, typename T, typename Extra>
796 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allo cator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator>::add(const T& key, const Extra& extra)
797 {
798 ASSERT(!m_accessForbidden);
799 ASSERT(Allocator::isAllocationAllowed());
800 if (!m_table)
801 expand();
802
803 ASSERT(m_table);
804
805 ValueType* table = m_table;
806 size_t k = 0;
807 size_t sizeMask = tableSizeMask();
808 unsigned h = HashTranslator::hash(key);
809 size_t i = h & sizeMask;
810
811 UPDATE_ACCESS_COUNTS();
812
813 ValueType* deletedEntry = nullptr;
814 ValueType* entry;
815 while (1) {
816 entry = table + i;
817
818 if (isEmptyBucket(*entry))
819 break;
820
821 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
822 if (HashTranslator::equal(Extractor::extract(*entry), key))
823 return AddResult(this, entry, false);
824
825 if (isDeletedBucket(*entry))
826 deletedEntry = entry;
827 } else {
828 if (isDeletedBucket(*entry))
829 deletedEntry = entry;
830 else if (HashTranslator::equal(Extractor::extract(*entry), key))
831 return AddResult(this, entry, false);
832 }
833 UPDATE_PROBE_COUNTS();
834 if (!k)
835 k = 1 | doubleHash(h);
836 i = (i + k) & sizeMask;
837 }
838
839 registerModification();
840
841 if (deletedEntry) {
842 // Overwrite any data left over from last use, using placement new or
843 // memset.
844 initializeBucket(*deletedEntry);
845 entry = deletedEntry;
846 --m_deletedCount;
847 }
848
849 HashTranslator::translate(*entry, key, extra);
850 ASSERT(!isEmptyOrDeletedBucket(*entry));
851
852 ++m_keyCount;
853
854 if (shouldExpand())
855 entry = expand(entry);
856
857 return AddResult(this, entry, true);
858 }
859
860 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
861 template <typename HashTranslator, typename T, typename Extra>
862 typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allo cator>::AddResult HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTra its, Allocator>::addPassingHashCode(const T& key, const Extra& extra)
863 {
864 ASSERT(!m_accessForbidden);
865 ASSERT(Allocator::isAllocationAllowed());
866 if (!m_table)
867 expand();
868
869 FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
870
871 ValueType* entry = lookupResult.first.first;
872 bool found = lookupResult.first.second;
873 unsigned h = lookupResult.second;
874
875 if (found)
876 return AddResult(this, entry, false);
877
878 registerModification();
879
880 if (isDeletedBucket(*entry)) {
881 initializeBucket(*entry);
882 --m_deletedCount;
883 }
884
885 HashTranslator::translate(*entry, key, extra, h);
886 ASSERT(!isEmptyOrDeletedBucket(*entry));
887
888 ++m_keyCount;
889 if (shouldExpand())
890 entry = expand(entry);
891
892 return AddResult(this, entry, true);
893 }
894
895 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
896 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::reinsert(ValueType& entry)
897 {
898 ASSERT(m_table);
899 registerModification();
900 ASSERT(!lookupForWriting(Extractor::extract(entry)).second);
901 ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first) ));
902 #if DUMP_HASHTABLE_STATS
903 atomicIncrement(&HashTableStats::numReinserts);
904 #endif
905 #if DUMP_HASHTABLE_STATS_PER_TABLE 1217 #if DUMP_HASHTABLE_STATS_PER_TABLE
906 ++m_stats->numReinserts; 1218 m_stats.swap(other.m_stats);
907 #endif 1219 #endif
908 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first; 1220 }
909 Mover<ValueType, Allocator>::move(entry, *newEntry); 1221
910 1222 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
911 return newEntry; 1223 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& H ashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::op erator=(const HashTable& other) {
912 } 1224 HashTable tmp(other);
913 1225 swap(tmp);
914 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 1226 return *this;
915 template <typename HashTranslator, typename T>
916 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::iterator HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::find(const T& key)
917 {
918 ValueType* entry = lookup<HashTranslator>(key);
919 if (!entry)
920 return end();
921
922 return makeKnownGoodIterator(entry);
923 }
924
925 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
926 template <typename HashTranslator, typename T>
927 inline typename HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator>::const_iterator HashTable<Key, Value, Extractor, HashFunctions, Tr aits, KeyTraits, Allocator>::find(const T& key) const
928 {
929 ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key) ;
930 if (!entry)
931 return end();
932
933 return makeKnownGoodConstIterator(entry);
934 }
935
936 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
937 template <typename HashTranslator, typename T>
938 bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::contains(const T& key) const
939 {
940 return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
941 }
942
943 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
944 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::remove(ValueType* pos)
945 {
946 registerModification();
947 #if DUMP_HASHTABLE_STATS
948 atomicIncrement(&HashTableStats::numRemoves);
949 #endif
950 #if DUMP_HASHTABLE_STATS_PER_TABLE
951 ++m_stats->numRemoves;
952 #endif
953
954 ASSERT(!m_accessForbidden);
955 #if ENABLE(ASSERT)
956 m_accessForbidden = true;
957 #endif
958 deleteBucket(*pos);
959 #if ENABLE(ASSERT)
960 m_accessForbidden = false;
961 #endif
962 ++m_deletedCount;
963 --m_keyCount;
964
965 if (shouldShrink())
966 shrink();
967 }
968
969 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
970 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(iterator it)
971 {
972 if (it == end())
973 return;
974 remove(const_cast<ValueType*>(it.m_iterator.m_position));
975 }
976
977 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
978 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(const_iterator it)
979 {
980 if (it == end())
981 return;
982 remove(const_cast<ValueType*>(it.m_position));
983 }
984
985 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
986 inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator>::remove(KeyPeekInType key)
987 {
988 remove(find(key));
989 }
990
991 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
992 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::allocateTable(unsigned size)
993 {
994 size_t allocSize = size * sizeof(ValueType);
995 ValueType* result;
996 // Assert that we will not use memset on things with a vtable entry. The
997 // compiler will also check this on some platforms. We would like to check
998 // this on the whole value (key-value pair), but IsPolymorphic will return
999 // false for a pair of two types, even if one of the components is
1000 // polymorphic.
1001 static_assert(!Traits::emptyValueIsZero || !IsPolymorphic<KeyType>::value, " empty value cannot be zero for things with a vtable");
1002
1003 #if ENABLE(OILPAN)
1004 static_assert(Allocator::isGarbageCollected
1005 || ((!AllowsOnlyPlacementNew<KeyType>::value || !NeedsTracing<KeyType>:: value)
1006 && (!AllowsOnlyPlacementNew<ValueType>::value || !NeedsTracing<ValueType >::value))
1007 , "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that have trace methods into an off-heap HashTable");
1008 #endif
1009 if (Traits::emptyValueIsZero) {
1010 result = Allocator::template allocateZeroedHashTableBacking<ValueType, H ashTable>(allocSize);
1011 } else {
1012 result = Allocator::template allocateHashTableBacking<ValueType, HashTab le>(allocSize);
1013 for (unsigned i = 0; i < size; i++)
1014 initializeBucket(result[i]);
1015 }
1016 return result;
1017 }
1018
1019 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1020 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::deleteAllBucketsAndDeallocate(ValueType* table, unsigned size)
1021 {
1022 if (!IsTriviallyDestructible<ValueType>::value) {
1023 for (unsigned i = 0; i < size; ++i) {
1024 // This code is called when the hash table is cleared or resized. We
1025 // have allocated a new backing store and we need to run the
1026 // destructors on the old backing store, as it is being freed. If we
1027 // are GCing we need to both call the destructor and mark the bucket
1028 // as deleted, otherwise the destructor gets called again when the
1029 // GC finds the backing store. With the default allocator it's
1030 // enough to call the destructor, since we will free the memory
1031 // explicitly and we won't see the memory with the bucket again.
1032 if (Allocator::isGarbageCollected) {
1033 if (!isEmptyOrDeletedBucket(table[i]))
1034 deleteBucket(table[i]);
1035 } else {
1036 if (!isDeletedBucket(table[i]))
1037 table[i].~ValueType();
1038 }
1039 }
1040 }
1041 Allocator::freeHashTableBacking(table);
1042 }
1043
1044 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1045 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::expand(Value* entry)
1046 {
1047 unsigned newSize;
1048 if (!m_tableSize) {
1049 newSize = KeyTraits::minimumTableSize;
1050 } else if (mustRehashInPlace()) {
1051 newSize = m_tableSize;
1052 } else {
1053 newSize = m_tableSize * 2;
1054 RELEASE_ASSERT(newSize > m_tableSize);
1055 }
1056
1057 return rehash(newSize, entry);
1058 }
1059
1060 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1061 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::expandBuffer(unsigned newTableSize, Value* entry, bool& success)
1062 {
1063 success = false;
1064 ASSERT(m_tableSize < newTableSize);
1065 if (!Allocator::expandHashTableBacking(m_table, newTableSize * sizeof(ValueT ype)))
1066 return nullptr;
1067
1068 success = true;
1069
1070 Value* newEntry = nullptr;
1071 unsigned oldTableSize = m_tableSize;
1072 ValueType* originalTable = m_table;
1073
1074 ValueType* temporaryTable = allocateTable(oldTableSize);
1075 for (unsigned i = 0; i < oldTableSize; i++) {
1076 if (&m_table[i] == entry)
1077 newEntry = &temporaryTable[i];
1078 if (isEmptyOrDeletedBucket(m_table[i])) {
1079 ASSERT(&m_table[i] != entry);
1080 if (Traits::emptyValueIsZero) {
1081 memset(&temporaryTable[i], 0, sizeof(ValueType));
1082 } else {
1083 initializeBucket(temporaryTable[i]);
1084 }
1085 } else {
1086 Mover<ValueType, Allocator>::move(m_table[i], temporaryTable[i]);
1087 }
1088 }
1089 m_table = temporaryTable;
1090
1091 if (Traits::emptyValueIsZero) {
1092 memset(originalTable, 0, newTableSize * sizeof(ValueType));
1093 } else {
1094 for (unsigned i = 0; i < newTableSize; i++)
1095 initializeBucket(originalTable[i]);
1096 }
1097 newEntry = rehashTo(originalTable, newTableSize, newEntry);
1098
1099 ASSERT(!m_accessForbidden);
1100 #if ENABLE(ASSERT)
1101 m_accessForbidden = true;
1102 #endif
1103 deleteAllBucketsAndDeallocate(temporaryTable, oldTableSize);
1104 #if ENABLE(ASSERT)
1105 m_accessForbidden = false;
1106 #endif
1107
1108 return newEntry;
1109 }
1110
1111 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1112 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry)
1113 {
1114 unsigned oldTableSize = m_tableSize;
1115 ValueType* oldTable = m_table;
1116
1117 #if DUMP_HASHTABLE_STATS
1118 if (oldTableSize != 0)
1119 atomicIncrement(&HashTableStats::numRehashes);
1120 #endif
1121
1122 #if DUMP_HASHTABLE_STATS_PER_TABLE
1123 if (oldTableSize != 0)
1124 ++m_stats->numRehashes;
1125 #endif
1126
1127 m_table = newTable;
1128 m_tableSize = newTableSize;
1129
1130 Value* newEntry = nullptr;
1131 for (unsigned i = 0; i != oldTableSize; ++i) {
1132 if (isEmptyOrDeletedBucket(oldTable[i])) {
1133 ASSERT(&oldTable[i] != entry);
1134 continue;
1135 }
1136 Value* reinsertedEntry = reinsert(oldTable[i]);
1137 if (&oldTable[i] == entry) {
1138 ASSERT(!newEntry);
1139 newEntry = reinsertedEntry;
1140 }
1141 }
1142
1143 m_deletedCount = 0;
1144
1145 return newEntry;
1146 }
1147
1148 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1149 Value* HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Alloca tor>::rehash(unsigned newTableSize, Value* entry)
1150 {
1151 unsigned oldTableSize = m_tableSize;
1152 ValueType* oldTable = m_table;
1153
1154 #if DUMP_HASHTABLE_STATS
1155 if (oldTableSize != 0)
1156 atomicIncrement(&HashTableStats::numRehashes);
1157 #endif
1158
1159 #if DUMP_HASHTABLE_STATS_PER_TABLE
1160 if (oldTableSize != 0)
1161 ++m_stats->numRehashes;
1162 #endif
1163
1164 // The Allocator::isGarbageCollected check is not needed. The check is just
1165 // a static hint for a compiler to indicate that Base::expandBuffer returns
1166 // false if Allocator is a PartitionAllocator.
1167 if (Allocator::isGarbageCollected && newTableSize > oldTableSize) {
1168 bool success;
1169 Value* newEntry = expandBuffer(newTableSize, entry, success);
1170 if (success)
1171 return newEntry;
1172 }
1173
1174 ValueType* newTable = allocateTable(newTableSize);
1175 Value* newEntry = rehashTo(newTable, newTableSize, entry);
1176
1177 ASSERT(!m_accessForbidden);
1178 #if ENABLE(ASSERT)
1179 m_accessForbidden = true;
1180 #endif
1181 deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1182 #if ENABLE(ASSERT)
1183 m_accessForbidden = false;
1184 #endif
1185
1186 return newEntry;
1187 }
1188
1189 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1190 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::clear()
1191 {
1192 registerModification();
1193 if (!m_table)
1194 return;
1195
1196 ASSERT(!m_accessForbidden);
1197 #if ENABLE(ASSERT)
1198 m_accessForbidden = true;
1199 #endif
1200 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1201 #if ENABLE(ASSERT)
1202 m_accessForbidden = false;
1203 #endif
1204 m_table = nullptr;
1205 m_tableSize = 0;
1206 m_keyCount = 0;
1207 }
1208
1209 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1210 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::H ashTable(const HashTable& other)
1211 : m_table(nullptr)
1212 , m_tableSize(0)
1213 , m_keyCount(0)
1214 , m_deletedCount(0)
1215 , m_queueFlag(false)
1216 #if ENABLE(ASSERT)
1217 , m_accessForbidden(false)
1218 , m_modifications(0)
1219 #endif
1220 #if DUMP_HASHTABLE_STATS_PER_TABLE
1221 , m_stats(adoptPtr(new Stats(*other.m_stats)))
1222 #endif
1223 {
1224 // Copy the hash table the dumb way, by adding each element to the new
1225 // table. It might be more efficient to copy the table slots, but it's not
1226 // clear that efficiency is needed.
1227 const_iterator end = other.end();
1228 for (const_iterator it = other.begin(); it != end; ++it)
1229 add(*it);
1230 }
1231
1232 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1233 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::swap(HashTable& other)
1234 {
1235 ASSERT(!m_accessForbidden);
1236 std::swap(m_table, other.m_table);
1237 std::swap(m_tableSize, other.m_tableSize);
1238 std::swap(m_keyCount, other.m_keyCount);
1239 // std::swap does not work for bit fields.
1240 unsigned deleted = m_deletedCount;
1241 m_deletedCount = other.m_deletedCount;
1242 other.m_deletedCount = deleted;
1243 ASSERT(!m_queueFlag);
1244 ASSERT(!other.m_queueFlag);
1245
1246 #if ENABLE(ASSERT)
1247 std::swap(m_modifications, other.m_modifications);
1248 #endif
1249
1250 #if DUMP_HASHTABLE_STATS_PER_TABLE
1251 m_stats.swap(other.m_stats);
1252 #endif
1253 }
1254
1255 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1256 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& H ashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::op erator=(const HashTable& other)
1257 {
1258 HashTable tmp(other);
1259 swap(tmp);
1260 return *this;
1261 } 1227 }
1262 1228
1263 template <WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typen ame Extractor, typename HashFunctions, typename Traits, typename KeyTraits, type name Allocator> 1229 template <WeakHandlingFlag weakHandlingFlag, typename Key, typename Value, typen ame Extractor, typename HashFunctions, typename Traits, typename KeyTraits, type name Allocator>
1264 struct WeakProcessingHashTableHelper; 1230 struct WeakProcessingHashTableHelper;
1265 1231
1266 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 1232 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1267 struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Ex tractor, HashFunctions, Traits, KeyTraits, Allocator> { 1233 struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, Key, Value, Ex tractor, HashFunctions, Traits, KeyTraits, Allocator> {
1268 static void process(typename Allocator::Visitor* visitor, void* closure) {} 1234 static void process(typename Allocator::Visitor* visitor, void* closure) {}
1269 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* c losure) {} 1235 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* clo sure) {}
1270 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, voi d* closure) {} 1236 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure) {}
1271 }; 1237 };
1272 1238
1273 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 1239 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1274 struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extr actor, HashFunctions, Traits, KeyTraits, Allocator> { 1240 struct WeakProcessingHashTableHelper<WeakHandlingInCollections, Key, Value, Extr actor, HashFunctions, Traits, KeyTraits, Allocator> {
1275 // Used for purely weak and for weak-and-strong tables (ephemerons). 1241 // Used for purely weak and for weak-and-strong tables (ephemerons).
1276 static void process(typename Allocator::Visitor* visitor, void* closure) 1242 static void process(typename Allocator::Visitor* visitor, void* closure) {
1277 { 1243 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator> HashTableType;
1278 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator> HashTableType; 1244 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1279 HashTableType* table = reinterpret_cast<HashTableType*>(closure); 1245 if (!table->m_table)
1280 if (!table->m_table) 1246 return;
1281 return; 1247 // Now perform weak processing (this is a no-op if the backing was
1282 // Now perform weak processing (this is a no-op if the backing was 1248 // accessible through an iterator and was already marked strongly).
1283 // accessible through an iterator and was already marked strongly). 1249 typedef typename HashTableType::ValueType ValueType;
1284 typedef typename HashTableType::ValueType ValueType; 1250 for (ValueType* element = table->m_table + table->m_tableSize - 1; element > = table->m_table; element--) {
1285 for (ValueType* element = table->m_table + table->m_tableSize - 1; eleme nt >= table->m_table; element--) { 1251 if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
1286 if (!HashTableType::isEmptyOrDeletedBucket(*element)) { 1252 // At this stage calling trace can make no difference
1287 // At this stage calling trace can make no difference 1253 // (everything is already traced), but we use the return value
1288 // (everything is already traced), but we use the return value 1254 // to remove things from the collection.
1289 // to remove things from the collection. 1255
1290 1256 // FIXME: This should be rewritten so that this can check if the
1291 // FIXME: This should be rewritten so that this can check if the 1257 // element is dead without calling trace, which is semantically
1292 // element is dead without calling trace, which is semantically 1258 // not correct to be called in weak processing stage.
1293 // not correct to be called in weak processing stage. 1259 if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWea k, ValueType, Traits>::trace(visitor, *element)) {
1294 if (TraceInCollectionTrait<WeakHandlingInCollections, WeakPointe rsActWeak, ValueType, Traits>::trace(visitor, *element)) { 1260 table->registerModification();
1295 table->registerModification(); 1261 HashTableType::deleteBucket(*element); // Also calls the destructor.
1296 HashTableType::deleteBucket(*element); // Also calls the des tructor. 1262 table->m_deletedCount++;
1297 table->m_deletedCount++; 1263 table->m_keyCount--;
1298 table->m_keyCount--; 1264 // We don't rehash the backing until the next add or delete,
1299 // We don't rehash the backing until the next add or delete, 1265 // because that would cause allocation during GC.
1300 // because that would cause allocation during GC.
1301 }
1302 }
1303 } 1266 }
1304 } 1267 }
1305 1268 }
1306 // Called repeatedly for tables that have both weak and strong pointers. 1269 }
1307 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* c losure) 1270
1308 { 1271 // Called repeatedly for tables that have both weak and strong pointers.
1309 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator> HashTableType; 1272 static void ephemeronIteration(typename Allocator::Visitor* visitor, void* clo sure) {
1310 HashTableType* table = reinterpret_cast<HashTableType*>(closure); 1273 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator> HashTableType;
1311 ASSERT(table->m_table); 1274 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1312 // Check the hash table for elements that we now know will not be 1275 ASSERT(table->m_table);
1313 // removed by weak processing. Those elements need to have their strong 1276 // Check the hash table for elements that we now know will not be
1314 // pointers traced. 1277 // removed by weak processing. Those elements need to have their strong
1315 typedef typename HashTableType::ValueType ValueType; 1278 // pointers traced.
1316 for (ValueType* element = table->m_table + table->m_tableSize - 1; eleme nt >= table->m_table; element--) { 1279 typedef typename HashTableType::ValueType ValueType;
1317 if (!HashTableType::isEmptyOrDeletedBucket(*element)) 1280 for (ValueType* element = table->m_table + table->m_tableSize - 1; element > = table->m_table; element--) {
1318 TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersAc tWeak, ValueType, Traits>::trace(visitor, *element); 1281 if (!HashTableType::isEmptyOrDeletedBucket(*element))
1319 } 1282 TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, V alueType, Traits>::trace(visitor, *element);
1320 } 1283 }
1321 1284 }
1322 // Called when the ephemeron iteration is done and before running the per 1285
1323 // thread weak processing. It is guaranteed to be called before any thread 1286 // Called when the ephemeron iteration is done and before running the per
1324 // is resumed. 1287 // thread weak processing. It is guaranteed to be called before any thread
1325 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, voi d* closure) 1288 // is resumed.
1326 { 1289 static void ephemeronIterationDone(typename Allocator::Visitor* visitor, void* closure) {
1327 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTrait s, Allocator> HashTableType; 1290 typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, A llocator> HashTableType;
1328 HashTableType* table = reinterpret_cast<HashTableType*>(closure); 1291 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1329 ASSERT(Allocator::weakTableRegistered(visitor, table)); 1292 ASSERT(Allocator::weakTableRegistered(visitor, table));
1330 table->clearEnqueued(); 1293 table->clearEnqueued();
1331 } 1294 }
1332 }; 1295 };
1333 1296
1334 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator> 1297 template <typename Key, typename Value, typename Extractor, typename HashFunctio ns, typename Traits, typename KeyTraits, typename Allocator>
1335 template <typename VisitorDispatcher> 1298 template <typename VisitorDispatcher>
1336 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::trace(VisitorDispatcher visitor) 1299 void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocato r>::trace(VisitorDispatcher visitor) {
1337 { 1300 // If someone else already marked the backing and queued up the trace and/or
1338 // If someone else already marked the backing and queued up the trace and/or 1301 // weak callback then we are done. This optimization does not happen for
1339 // weak callback then we are done. This optimization does not happen for 1302 // ListHashSet since its iterator does not point at the backing.
1340 // ListHashSet since its iterator does not point at the backing. 1303 if (!m_table || Allocator::isHeapObjectAlive(m_table))
1341 if (!m_table || Allocator::isHeapObjectAlive(m_table)) 1304 return;
1342 return; 1305 // Normally, we mark the backing store without performing trace. This means
1343 // Normally, we mark the backing store without performing trace. This means 1306 // it is marked live, but the pointers inside it are not marked. Instead we
1344 // it is marked live, but the pointers inside it are not marked. Instead we 1307 // will mark the pointers below. However, for backing stores that contain
1345 // will mark the pointers below. However, for backing stores that contain 1308 // weak pointers the handling is rather different. We don't mark the
1346 // weak pointers the handling is rather different. We don't mark the 1309 // backing store here, so the marking GC will leave the backing unmarked. If
1347 // backing store here, so the marking GC will leave the backing unmarked. If 1310 // the backing is found in any other way than through its HashTable (ie from
1348 // the backing is found in any other way than through its HashTable (ie from 1311 // an iterator) then the mark bit will be set and the pointers will be
1349 // an iterator) then the mark bit will be set and the pointers will be 1312 // marked strongly, avoiding problems with iterating over things that
1350 // marked strongly, avoiding problems with iterating over things that 1313 // disappear due to weak processing while we are iterating over them. We
1351 // disappear due to weak processing while we are iterating over them. We 1314 // register the backing store pointer for delayed marking which will take
1352 // register the backing store pointer for delayed marking which will take 1315 // place after we know if the backing is reachable from elsewhere. We also
1353 // place after we know if the backing is reachable from elsewhere. We also 1316 // register a weakProcessing callback which will perform weak processing if
1354 // register a weakProcessing callback which will perform weak processing if 1317 // needed.
1355 // needed. 1318 if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) {
1356 if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) { 1319 Allocator::markNoTracing(visitor, m_table);
1357 Allocator::markNoTracing(visitor, m_table); 1320 } else {
1358 } else { 1321 Allocator::registerDelayedMarkNoTracing(visitor, m_table);
1359 Allocator::registerDelayedMarkNoTracing(visitor, m_table); 1322 // Since we're delaying marking this HashTable, it is possible that the
1360 // Since we're delaying marking this HashTable, it is possible that the 1323 // registerWeakMembers is called multiple times (in rare
1361 // registerWeakMembers is called multiple times (in rare 1324 // cases). However, it shouldn't cause any issue.
1362 // cases). However, it shouldn't cause any issue. 1325 Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHashTab leHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::process);
1363 Allocator::registerWeakMembers(visitor, this, m_table, WeakProcessingHas hTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Tra its, KeyTraits, Allocator>::process); 1326 }
1364 } 1327 if (NeedsTracingTrait<Traits>::value) {
1365 if (NeedsTracingTrait<Traits>::value) { 1328 if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
1366 if (Traits::weakHandlingFlag == WeakHandlingInCollections) { 1329 // If we have both strong and weak pointers in the collection then
1367 // If we have both strong and weak pointers in the collection then 1330 // we queue up the collection for fixed point iteration a la
1368 // we queue up the collection for fixed point iteration a la 1331 // Ephemerons:
1369 // Ephemerons: 1332 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
1370 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also 1333 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
1371 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak 1334 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this));
1372 ASSERT(!enqueued() || Allocator::weakTableRegistered(visitor, this)) ; 1335 if (!enqueued()) {
1373 if (!enqueued()) { 1336 Allocator::registerWeakTable(visitor, this,
1374 Allocator::registerWeakTable(visitor, this, 1337 WeakProcessingHashTableHelper<Traits::weakH andlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> ::ephemeronIteration,
1375 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterat ion, 1338 WeakProcessingHashTableHelper<Traits::weakH andlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator> ::ephemeronIterationDone);
1376 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::ephemeronIterat ionDone); 1339 setEnqueued();
1377 setEnqueued(); 1340 }
1378 } 1341 // We don't need to trace the elements here, since registering as a
1379 // We don't need to trace the elements here, since registering as a 1342 // weak table above will cause them to be traced (perhaps several
1380 // weak table above will cause them to be traced (perhaps several 1343 // times). It's better to wait until everything else is traced
1381 // times). It's better to wait until everything else is traced 1344 // before tracing the elements for the first time; this may reduce
1382 // before tracing the elements for the first time; this may reduce 1345 // (by one) the number of iterations needed to get to a fixed point.
1383 // (by one) the number of iterations needed to get to a fixed point. 1346 return;
1384 return; 1347 }
1385 } 1348 for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; ele ment--) {
1386 for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; element--) { 1349 if (!isEmptyOrDeletedBucket(*element))
1387 if (!isEmptyOrDeletedBucket(*element)) 1350 Allocator::template trace<VisitorDispatcher, ValueType, Traits>(visitor, *element);
1388 Allocator::template trace<VisitorDispatcher, ValueType, Traits>( visitor, *element); 1351 }
1389 } 1352 }
1390 }
1391 } 1353 }
1392 1354
1393 // iterator adapters 1355 // iterator adapters
1394 1356
1395 template <typename HashTableType, typename Traits> struct HashTableConstIterator Adapter { 1357 template <typename HashTableType, typename Traits>
1396 HashTableConstIteratorAdapter() {} 1358 struct HashTableConstIteratorAdapter {
1397 HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {} 1359 HashTableConstIteratorAdapter() {}
1398 typedef typename Traits::IteratorConstGetType GetType; 1360 HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& im pl)
1399 typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetT ype; 1361 : m_impl(impl) {}
1400 1362 typedef typename Traits::IteratorConstGetType GetType;
1401 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get()) ); } 1363 typedef typename HashTableType::ValueTraits::IteratorConstGetType SourceGetTyp e;
1402 typename Traits::IteratorConstReferenceType operator*() const { return Trait s::getToReferenceConstConversion(get()); } 1364
1403 GetType operator->() const { return get(); } 1365 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1404 1366 typename Traits::IteratorConstReferenceType operator*() const { return Traits: :getToReferenceConstConversion(get()); }
1405 HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; } 1367 GetType operator->() const { return get(); }
1406 // postfix ++ intentionally omitted 1368
1407 1369 HashTableConstIteratorAdapter& operator++() {
1408 typename HashTableType::const_iterator m_impl; 1370 ++m_impl;
1409 }; 1371 return *this;
1410 1372 }
1411 template <typename HashTableType, typename Traits> struct HashTableIteratorAdapt er { 1373 // postfix ++ intentionally omitted
1412 typedef typename Traits::IteratorGetType GetType; 1374
1413 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType; 1375 typename HashTableType::const_iterator m_impl;
1414 1376 };
1415 HashTableIteratorAdapter() {} 1377
1416 HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_i mpl(impl) {} 1378 template <typename HashTableType, typename Traits>
1417 1379 struct HashTableIteratorAdapter {
1418 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get()) ); } 1380 typedef typename Traits::IteratorGetType GetType;
1419 typename Traits::IteratorReferenceType operator*() const { return Traits::ge tToReferenceConversion(get()); } 1381 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
1420 GetType operator->() const { return get(); } 1382
1421 1383 HashTableIteratorAdapter() {}
1422 HashTableIteratorAdapter& operator++() { ++m_impl; return *this; } 1384 HashTableIteratorAdapter(const typename HashTableType::iterator& impl)
1423 // postfix ++ intentionally omitted 1385 : m_impl(impl) {}
1424 1386
1425 operator HashTableConstIteratorAdapter<HashTableType, Traits>() 1387 GetType get() const { return const_cast<GetType>(SourceGetType(m_impl.get())); }
1426 { 1388 typename Traits::IteratorReferenceType operator*() const { return Traits::getT oReferenceConversion(get()); }
1427 typename HashTableType::const_iterator i = m_impl; 1389 GetType operator->() const { return get(); }
1428 return i; 1390
1429 } 1391 HashTableIteratorAdapter& operator++() {
1430 1392 ++m_impl;
1431 typename HashTableType::iterator m_impl; 1393 return *this;
1432 }; 1394 }
1433 1395 // postfix ++ intentionally omitted
1434 template <typename T, typename U> 1396
1435 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableConstIteratorAdapter<T, U>& b) 1397 operator HashTableConstIteratorAdapter<HashTableType, Traits>() {
1436 { 1398 typename HashTableType::const_iterator i = m_impl;
1437 return a.m_impl == b.m_impl; 1399 return i;
1438 } 1400 }
1439 1401
1440 template <typename T, typename U> 1402 typename HashTableType::iterator m_impl;
1441 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableConstIteratorAdapter<T, U>& b) 1403 };
1442 { 1404
1443 return a.m_impl != b.m_impl; 1405 template <typename T, typename U>
1444 } 1406 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableConstIteratorAdapter<T, U>& b) {
1445 1407 return a.m_impl == b.m_impl;
1446 template <typename T, typename U> 1408 }
1447 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableI teratorAdapter<T, U>& b) 1409
1448 { 1410 template <typename T, typename U>
1449 return a.m_impl == b.m_impl; 1411 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableConstIteratorAdapter<T, U>& b) {
1450 } 1412 return a.m_impl != b.m_impl;
1451 1413 }
1452 template <typename T, typename U> 1414
1453 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableI teratorAdapter<T, U>& b) 1415 template <typename T, typename U>
1454 { 1416 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableI teratorAdapter<T, U>& b) {
1455 return a.m_impl != b.m_impl; 1417 return a.m_impl == b.m_impl;
1418 }
1419
1420 template <typename T, typename U>
1421 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableI teratorAdapter<T, U>& b) {
1422 return a.m_impl != b.m_impl;
1456 } 1423 }
1457 1424
1458 // All 4 combinations of ==, != and Const,non const. 1425 // All 4 combinations of ==, != and Const,non const.
1459 template <typename T, typename U> 1426 template <typename T, typename U>
1460 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableIteratorAdapter<T, U>& b) 1427 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableIteratorAdapter<T, U>& b) {
1461 { 1428 return a.m_impl == b.m_impl;
1462 return a.m_impl == b.m_impl; 1429 }
1463 } 1430
1464 1431 template <typename T, typename U>
1465 template <typename T, typename U> 1432 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableIteratorAdapter<T, U>& b) {
1466 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashT ableIteratorAdapter<T, U>& b) 1433 return a.m_impl != b.m_impl;
1467 { 1434 }
1468 return a.m_impl != b.m_impl; 1435
1469 } 1436 template <typename T, typename U>
1470 1437 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableC onstIteratorAdapter<T, U>& b) {
1471 template <typename T, typename U> 1438 return a.m_impl == b.m_impl;
1472 inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableC onstIteratorAdapter<T, U>& b) 1439 }
1473 { 1440
1474 return a.m_impl == b.m_impl; 1441 template <typename T, typename U>
1475 } 1442 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableC onstIteratorAdapter<T, U>& b) {
1476 1443 return a.m_impl != b.m_impl;
1477 template <typename T, typename U>
1478 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableC onstIteratorAdapter<T, U>& b)
1479 {
1480 return a.m_impl != b.m_impl;
1481 } 1444 }
1482 1445
1483 template <typename Collection1, typename Collection2> 1446 template <typename Collection1, typename Collection2>
1484 inline void removeAll(Collection1& collection, const Collection2& toBeRemoved) 1447 inline void removeAll(Collection1& collection, const Collection2& toBeRemoved) {
1485 { 1448 if (collection.isEmpty() || toBeRemoved.isEmpty())
1486 if (collection.isEmpty() || toBeRemoved.isEmpty()) 1449 return;
1487 return; 1450 typedef typename Collection2::const_iterator CollectionIterator;
1488 typedef typename Collection2::const_iterator CollectionIterator; 1451 CollectionIterator end(toBeRemoved.end());
1489 CollectionIterator end(toBeRemoved.end()); 1452 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
1490 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it) 1453 collection.remove(*it);
1491 collection.remove(*it); 1454 }
1492 } 1455
1493 1456 } // namespace WTF
1494 } // namespace WTF
1495 1457
1496 #include "wtf/HashIterators.h" 1458 #include "wtf/HashIterators.h"
1497 1459
1498 #endif // WTF_HashTable_h 1460 #endif // WTF_HashTable_h
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/wtf/HashSetTest.cpp ('k') | third_party/WebKit/Source/wtf/HashTable.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698