Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: third_party/WebKit/Source/wtf/HashTable.h

Issue 2769603002: Move files in wtf/ to platform/wtf/ (Part 8). (Closed)
Patch Set: Rebase. Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « third_party/WebKit/Source/wtf/HashSet.h ('k') | third_party/WebKit/Source/wtf/HashTable.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights 2 // Use of this source code is governed by a BSD-style license that can be
3 * reserved. 3 // found in the LICENSE file.
4 * Copyright (C) 2008 David Levin <levin@chromium.org>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU Library General Public License
17 * along with this library; see the file COPYING.LIB. If not, write to
18 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
20 *
21 */
22 4
23 #ifndef WTF_HashTable_h 5 #include "platform/wtf/HashTable.h"
24 #define WTF_HashTable_h
25 6
26 #include "wtf/Alignment.h" 7 // The contents of this header was moved to platform/wtf as part of
27 #include "wtf/Allocator.h" 8 // WTF migration project. See the following post for details:
28 #include "wtf/Assertions.h" 9 // https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gY CAAJ
29 #include "wtf/ConditionalDestructor.h"
30 #include "wtf/HashTraits.h"
31 #include "wtf/PtrUtil.h"
32 #include "wtf/allocator/PartitionAllocator.h"
33 #include <memory>
34
35 #define DUMP_HASHTABLE_STATS 0
36 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
37
38 #if DUMP_HASHTABLE_STATS
39 #include "wtf/Atomics.h"
40 #include "wtf/Threading.h"
41 #endif
42
43 #if DUMP_HASHTABLE_STATS_PER_TABLE
44 #include "wtf/DataLog.h"
45 #include <type_traits>
46 #endif
47
48 #if DUMP_HASHTABLE_STATS
49 #if DUMP_HASHTABLE_STATS_PER_TABLE
50
51 #define UPDATE_PROBE_COUNTS() \
52 ++probeCount; \
53 HashTableStats::instance().recordCollisionAtCount(probeCount); \
54 ++perTableProbeCount; \
55 m_stats->recordCollisionAtCount(perTableProbeCount)
56 #define UPDATE_ACCESS_COUNTS() \
57 atomicIncrement(&HashTableStats::instance().numAccesses); \
58 int probeCount = 0; \
59 ++m_stats->numAccesses; \
60 int perTableProbeCount = 0
61 #else
62 #define UPDATE_PROBE_COUNTS() \
63 ++probeCount; \
64 HashTableStats::instance().recordCollisionAtCount(probeCount)
65 #define UPDATE_ACCESS_COUNTS() \
66 atomicIncrement(&HashTableStats::instance().numAccesses); \
67 int probeCount = 0
68 #endif
69 #else
70 #if DUMP_HASHTABLE_STATS_PER_TABLE
71 #define UPDATE_PROBE_COUNTS() \
72 ++perTableProbeCount; \
73 m_stats->recordCollisionAtCount(perTableProbeCount)
74 #define UPDATE_ACCESS_COUNTS() \
75 ++m_stats->numAccesses; \
76 int perTableProbeCount = 0
77 #else
78 #define UPDATE_PROBE_COUNTS() \
79 do { \
80 } while (0)
81 #define UPDATE_ACCESS_COUNTS() \
82 do { \
83 } while (0)
84 #endif
85 #endif
86
87 namespace WTF {
88
89 // This is for tracing inside collections that have special support for weak
90 // pointers. The trait has a trace method which returns true if there are weak
91 // pointers to things that have not (yet) been marked live. Returning true
92 // indicates that the entry in the collection may yet be removed by weak
93 // handling. Default implementation for non-weak types is to use the regular
94 // non-weak TraceTrait. Default implementation for types with weakness is to
95 // call traceInCollection on the type's trait.
96 template <WeakHandlingFlag weakHandlingFlag,
97 ShouldWeakPointersBeMarkedStrongly strongify,
98 typename T,
99 typename Traits>
100 struct TraceInCollectionTrait;
101
102 #if DUMP_HASHTABLE_STATS
103 struct WTF_EXPORT HashTableStats {
104 HashTableStats()
105 : numAccesses(0),
106 numRehashes(0),
107 numRemoves(0),
108 numReinserts(0),
109 maxCollisions(0),
110 numCollisions(0),
111 collisionGraph() {}
112
113 // The following variables are all atomically incremented when modified.
114 int numAccesses;
115 int numRehashes;
116 int numRemoves;
117 int numReinserts;
118
119 // The following variables are only modified in the recordCollisionAtCount
120 // method within a mutex.
121 int maxCollisions;
122 int numCollisions;
123 int collisionGraph[4096];
124
125 void copy(const HashTableStats* other);
126 void recordCollisionAtCount(int count);
127 void dumpStats();
128
129 static HashTableStats& instance();
130
131 template <typename VisitorDispatcher>
132 void trace(VisitorDispatcher) {}
133 };
134
135 #if DUMP_HASHTABLE_STATS_PER_TABLE
136 template <typename Allocator, bool isGCType = Allocator::isGarbageCollected>
137 class HashTableStatsPtr;
138
139 template <typename Allocator>
140 class HashTableStatsPtr<Allocator, false> final {
141 STATIC_ONLY(HashTableStatsPtr);
142
143 public:
144 static std::unique_ptr<HashTableStats> create() {
145 return WTF::wrapUnique(new HashTableStats);
146 }
147
148 static std::unique_ptr<HashTableStats> copy(
149 const std::unique_ptr<HashTableStats>& other) {
150 if (!other)
151 return nullptr;
152 return WTF::wrapUnique(new HashTableStats(*other));
153 }
154
155 static void swap(std::unique_ptr<HashTableStats>& stats,
156 std::unique_ptr<HashTableStats>& other) {
157 stats.swap(other);
158 }
159 };
160
161 template <typename Allocator>
162 class HashTableStatsPtr<Allocator, true> final {
163 STATIC_ONLY(HashTableStatsPtr);
164
165 public:
166 static HashTableStats* create() {
167 // Resort to manually allocating this POD on the vector
168 // backing heap, as blink::GarbageCollected<> isn't in scope
169 // in WTF.
170 void* storage = reinterpret_cast<void*>(
171 Allocator::template allocateVectorBacking<unsigned char>(
172 sizeof(HashTableStats)));
173 return new (storage) HashTableStats;
174 }
175
176 static HashTableStats* copy(const HashTableStats* other) {
177 if (!other)
178 return nullptr;
179 HashTableStats* obj = create();
180 obj->copy(other);
181 return obj;
182 }
183
184 static void swap(HashTableStats*& stats, HashTableStats*& other) {
185 std::swap(stats, other);
186 }
187 };
188 #endif
189 #endif
190
191 template <typename Key,
192 typename Value,
193 typename Extractor,
194 typename HashFunctions,
195 typename Traits,
196 typename KeyTraits,
197 typename Allocator>
198 class HashTable;
199 template <typename Key,
200 typename Value,
201 typename Extractor,
202 typename HashFunctions,
203 typename Traits,
204 typename KeyTraits,
205 typename Allocator>
206 class HashTableIterator;
207 template <typename Key,
208 typename Value,
209 typename Extractor,
210 typename HashFunctions,
211 typename Traits,
212 typename KeyTraits,
213 typename Allocator>
214 class HashTableConstIterator;
215 template <typename Value,
216 typename HashFunctions,
217 typename HashTraits,
218 typename Allocator>
219 class LinkedHashSet;
220 template <WeakHandlingFlag x,
221 typename T,
222 typename U,
223 typename V,
224 typename W,
225 typename X,
226 typename Y,
227 typename Z>
228 struct WeakProcessingHashTableHelper;
229
230 typedef enum { HashItemKnownGood } HashItemKnownGoodTag;
231
232 template <typename Key,
233 typename Value,
234 typename Extractor,
235 typename HashFunctions,
236 typename Traits,
237 typename KeyTraits,
238 typename Allocator>
239 class HashTableConstIterator final {
240 DISALLOW_NEW();
241
242 private:
243 typedef HashTable<Key,
244 Value,
245 Extractor,
246 HashFunctions,
247 Traits,
248 KeyTraits,
249 Allocator>
250 HashTableType;
251 typedef HashTableIterator<Key,
252 Value,
253 Extractor,
254 HashFunctions,
255 Traits,
256 KeyTraits,
257 Allocator>
258 iterator;
259 typedef HashTableConstIterator<Key,
260 Value,
261 Extractor,
262 HashFunctions,
263 Traits,
264 KeyTraits,
265 Allocator>
266 const_iterator;
267 typedef Value ValueType;
268 using value_type = ValueType;
269 typedef typename Traits::IteratorConstGetType GetType;
270 typedef const ValueType* PointerType;
271
272 friend class HashTable<Key,
273 Value,
274 Extractor,
275 HashFunctions,
276 Traits,
277 KeyTraits,
278 Allocator>;
279 friend class HashTableIterator<Key,
280 Value,
281 Extractor,
282 HashFunctions,
283 Traits,
284 KeyTraits,
285 Allocator>;
286
287 void skipEmptyBuckets() {
288 while (m_position != m_endPosition &&
289 HashTableType::isEmptyOrDeletedBucket(*m_position))
290 ++m_position;
291 }
292
293 HashTableConstIterator(PointerType position,
294 PointerType endPosition,
295 const HashTableType* container)
296 : m_position(position),
297 m_endPosition(endPosition)
298 #if DCHECK_IS_ON()
299 ,
300 m_container(container),
301 m_containerModifications(container->modifications())
302 #endif
303 {
304 skipEmptyBuckets();
305 }
306
307 HashTableConstIterator(PointerType position,
308 PointerType endPosition,
309 const HashTableType* container,
310 HashItemKnownGoodTag)
311 : m_position(position),
312 m_endPosition(endPosition)
313 #if DCHECK_IS_ON()
314 ,
315 m_container(container),
316 m_containerModifications(container->modifications())
317 #endif
318 {
319 #if DCHECK_IS_ON()
320 DCHECK_EQ(m_containerModifications, m_container->modifications());
321 #endif
322 }
323
324 void checkModifications() const {
325 #if DCHECK_IS_ON()
326 // HashTable and collections that build on it do not support
327 // modifications while there is an iterator in use. The exception is
328 // ListHashSet, which has its own iterators that tolerate modification
329 // of the underlying set.
330 DCHECK_EQ(m_containerModifications, m_container->modifications());
331 DCHECK(!m_container->accessForbidden());
332 #endif
333 }
334
335 public:
336 HashTableConstIterator() {}
337
338 GetType get() const {
339 checkModifications();
340 return m_position;
341 }
342 typename Traits::IteratorConstReferenceType operator*() const {
343 return Traits::getToReferenceConstConversion(get());
344 }
345 GetType operator->() const { return get(); }
346
347 const_iterator& operator++() {
348 DCHECK_NE(m_position, m_endPosition);
349 checkModifications();
350 ++m_position;
351 skipEmptyBuckets();
352 return *this;
353 }
354
355 // postfix ++ intentionally omitted
356
357 // Comparison.
358 bool operator==(const const_iterator& other) const {
359 return m_position == other.m_position;
360 }
361 bool operator!=(const const_iterator& other) const {
362 return m_position != other.m_position;
363 }
364 bool operator==(const iterator& other) const {
365 return *this == static_cast<const_iterator>(other);
366 }
367 bool operator!=(const iterator& other) const {
368 return *this != static_cast<const_iterator>(other);
369 }
370
371 std::ostream& printTo(std::ostream& stream) const {
372 if (m_position == m_endPosition)
373 return stream << "iterator representing <end>";
374 // TODO(tkent): Change |m_position| to |*m_position| to show the
375 // pointed object. It requires a lot of new stream printer functions.
376 return stream << "iterator pointing to " << m_position;
377 }
378
379 private:
380 PointerType m_position;
381 PointerType m_endPosition;
382 #if DCHECK_IS_ON()
383 const HashTableType* m_container;
384 int64_t m_containerModifications;
385 #endif
386 };
387
388 template <typename Key,
389 typename Value,
390 typename Extractor,
391 typename Hash,
392 typename Traits,
393 typename KeyTraits,
394 typename Allocator>
395 std::ostream& operator<<(std::ostream& stream,
396 const HashTableConstIterator<Key,
397 Value,
398 Extractor,
399 Hash,
400 Traits,
401 KeyTraits,
402 Allocator>& iterator) {
403 return iterator.printTo(stream);
404 }
405
406 template <typename Key,
407 typename Value,
408 typename Extractor,
409 typename HashFunctions,
410 typename Traits,
411 typename KeyTraits,
412 typename Allocator>
413 class HashTableIterator final {
414 DISALLOW_NEW();
415
416 private:
417 typedef HashTable<Key,
418 Value,
419 Extractor,
420 HashFunctions,
421 Traits,
422 KeyTraits,
423 Allocator>
424 HashTableType;
425 typedef HashTableIterator<Key,
426 Value,
427 Extractor,
428 HashFunctions,
429 Traits,
430 KeyTraits,
431 Allocator>
432 iterator;
433 typedef HashTableConstIterator<Key,
434 Value,
435 Extractor,
436 HashFunctions,
437 Traits,
438 KeyTraits,
439 Allocator>
440 const_iterator;
441 typedef Value ValueType;
442 typedef typename Traits::IteratorGetType GetType;
443 typedef ValueType* PointerType;
444
445 friend class HashTable<Key,
446 Value,
447 Extractor,
448 HashFunctions,
449 Traits,
450 KeyTraits,
451 Allocator>;
452
453 HashTableIterator(PointerType pos,
454 PointerType end,
455 const HashTableType* container)
456 : m_iterator(pos, end, container) {}
457 HashTableIterator(PointerType pos,
458 PointerType end,
459 const HashTableType* container,
460 HashItemKnownGoodTag tag)
461 : m_iterator(pos, end, container, tag) {}
462
463 public:
464 HashTableIterator() {}
465
466 // default copy, assignment and destructor are OK
467
468 GetType get() const { return const_cast<GetType>(m_iterator.get()); }
469 typename Traits::IteratorReferenceType operator*() const {
470 return Traits::getToReferenceConversion(get());
471 }
472 GetType operator->() const { return get(); }
473
474 iterator& operator++() {
475 ++m_iterator;
476 return *this;
477 }
478
479 // postfix ++ intentionally omitted
480
481 // Comparison.
482 bool operator==(const iterator& other) const {
483 return m_iterator == other.m_iterator;
484 }
485 bool operator!=(const iterator& other) const {
486 return m_iterator != other.m_iterator;
487 }
488 bool operator==(const const_iterator& other) const {
489 return m_iterator == other;
490 }
491 bool operator!=(const const_iterator& other) const {
492 return m_iterator != other;
493 }
494
495 operator const_iterator() const { return m_iterator; }
496 std::ostream& printTo(std::ostream& stream) const {
497 return m_iterator.printTo(stream);
498 }
499
500 private:
501 const_iterator m_iterator;
502 };
503
504 template <typename Key,
505 typename Value,
506 typename Extractor,
507 typename Hash,
508 typename Traits,
509 typename KeyTraits,
510 typename Allocator>
511 std::ostream& operator<<(std::ostream& stream,
512 const HashTableIterator<Key,
513 Value,
514 Extractor,
515 Hash,
516 Traits,
517 KeyTraits,
518 Allocator>& iterator) {
519 return iterator.printTo(stream);
520 }
521
522 using std::swap;
523
524 template <typename T, typename Allocator, bool enterGCForbiddenScope>
525 struct Mover {
526 STATIC_ONLY(Mover);
527 static void move(T&& from, T& to) {
528 to.~T();
529 new (NotNull, &to) T(std::move(from));
530 }
531 };
532
533 template <typename T, typename Allocator>
534 struct Mover<T, Allocator, true> {
535 STATIC_ONLY(Mover);
536 static void move(T&& from, T& to) {
537 to.~T();
538 Allocator::enterGCForbiddenScope();
539 new (NotNull, &to) T(std::move(from));
540 Allocator::leaveGCForbiddenScope();
541 }
542 };
543
544 template <typename HashFunctions>
545 class IdentityHashTranslator {
546 STATIC_ONLY(IdentityHashTranslator);
547
548 public:
549 template <typename T>
550 static unsigned hash(const T& key) {
551 return HashFunctions::hash(key);
552 }
553 template <typename T, typename U>
554 static bool equal(const T& a, const U& b) {
555 return HashFunctions::equal(a, b);
556 }
557 template <typename T, typename U, typename V>
558 static void translate(T& location, U&&, V&& value) {
559 location = std::forward<V>(value);
560 }
561 };
562
563 template <typename HashTableType, typename ValueType>
564 struct HashTableAddResult final {
565 STACK_ALLOCATED();
566 HashTableAddResult(const HashTableType* container,
567 ValueType* storedValue,
568 bool isNewEntry)
569 : storedValue(storedValue),
570 isNewEntry(isNewEntry)
571 #if ENABLE(SECURITY_ASSERT)
572 ,
573 m_container(container),
574 m_containerModifications(container->modifications())
575 #endif
576 {
577 ALLOW_UNUSED_LOCAL(container);
578 DCHECK(container);
579 }
580
581 ValueType* storedValue;
582 bool isNewEntry;
583
584 #if ENABLE(SECURITY_ASSERT)
585 ~HashTableAddResult() {
586 // If rehash happened before accessing storedValue, it's
587 // use-after-free. Any modification may cause a rehash, so we check for
588 // modifications here.
589
590 // Rehash after accessing storedValue is harmless but will assert if the
591 // AddResult destructor takes place after a modification. You may need
592 // to limit the scope of the AddResult.
593 SECURITY_DCHECK(m_containerModifications == m_container->modifications());
594 }
595
596 private:
597 const HashTableType* m_container;
598 const int64_t m_containerModifications;
599 #endif
600 };
601
602 template <typename Value, typename Extractor, typename KeyTraits>
603 struct HashTableHelper {
604 STATIC_ONLY(HashTableHelper);
605 static bool isEmptyBucket(const Value& value) {
606 return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value));
607 }
608 static bool isDeletedBucket(const Value& value) {
609 return KeyTraits::isDeletedValue(Extractor::extract(value));
610 }
611 static bool isEmptyOrDeletedBucket(const Value& value) {
612 return isEmptyBucket(value) || isDeletedBucket(value);
613 }
614 };
615
616 template <typename HashTranslator,
617 typename KeyTraits,
618 bool safeToCompareToEmptyOrDeleted>
619 struct HashTableKeyChecker {
620 STATIC_ONLY(HashTableKeyChecker);
621 // There's no simple generic way to make this check if
622 // safeToCompareToEmptyOrDeleted is false, so the check always passes.
623 template <typename T>
624 static bool checkKey(const T&) {
625 return true;
626 }
627 };
628
629 template <typename HashTranslator, typename KeyTraits>
630 struct HashTableKeyChecker<HashTranslator, KeyTraits, true> {
631 STATIC_ONLY(HashTableKeyChecker);
632 template <typename T>
633 static bool checkKey(const T& key) {
634 // FIXME : Check also equality to the deleted value.
635 return !HashTranslator::equal(KeyTraits::emptyValue(), key);
636 }
637 };
638
639 // Note: empty or deleted key values are not allowed, using them may lead to
640 // undefined behavior. For pointer keys this means that null pointers are not
641 // allowed unless you supply custom key traits.
642 template <typename Key,
643 typename Value,
644 typename Extractor,
645 typename HashFunctions,
646 typename Traits,
647 typename KeyTraits,
648 typename Allocator>
649 class HashTable final
650 : public ConditionalDestructor<HashTable<Key,
651 Value,
652 Extractor,
653 HashFunctions,
654 Traits,
655 KeyTraits,
656 Allocator>,
657 Allocator::isGarbageCollected> {
658 DISALLOW_NEW();
659
660 public:
661 typedef HashTableIterator<Key,
662 Value,
663 Extractor,
664 HashFunctions,
665 Traits,
666 KeyTraits,
667 Allocator>
668 iterator;
669 typedef HashTableConstIterator<Key,
670 Value,
671 Extractor,
672 HashFunctions,
673 Traits,
674 KeyTraits,
675 Allocator>
676 const_iterator;
677 typedef Traits ValueTraits;
678 typedef Key KeyType;
679 typedef typename KeyTraits::PeekInType KeyPeekInType;
680 typedef Value ValueType;
681 typedef Extractor ExtractorType;
682 typedef KeyTraits KeyTraitsType;
683 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType;
684 typedef HashTableAddResult<HashTable, ValueType> AddResult;
685
686 HashTable();
687 void finalize() {
688 DCHECK(!Allocator::isGarbageCollected);
689 if (LIKELY(!m_table))
690 return;
691 enterAccessForbiddenScope();
692 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
693 leaveAccessForbiddenScope();
694 m_table = nullptr;
695 }
696
697 HashTable(const HashTable&);
698 HashTable(HashTable&&);
699 void swap(HashTable&);
700 HashTable& operator=(const HashTable&);
701 HashTable& operator=(HashTable&&);
702
703 // When the hash table is empty, just return the same iterator for end as
704 // for begin. This is more efficient because we don't have to skip all the
705 // empty and deleted buckets, and iterating an empty table is a common case
706 // that's worth optimizing.
707 iterator begin() { return isEmpty() ? end() : makeIterator(m_table); }
708 iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); }
709 const_iterator begin() const {
710 return isEmpty() ? end() : makeConstIterator(m_table);
711 }
712 const_iterator end() const {
713 return makeKnownGoodConstIterator(m_table + m_tableSize);
714 }
715
716 unsigned size() const {
717 DCHECK(!accessForbidden());
718 return m_keyCount;
719 }
720 unsigned capacity() const {
721 DCHECK(!accessForbidden());
722 return m_tableSize;
723 }
724 bool isEmpty() const {
725 DCHECK(!accessForbidden());
726 return !m_keyCount;
727 }
728
729 void reserveCapacityForSize(unsigned size);
730
731 template <typename IncomingValueType>
732 AddResult add(IncomingValueType&& value) {
733 return add<IdentityTranslatorType>(Extractor::extract(value),
734 std::forward<IncomingValueType>(value));
735 }
736
737 // A special version of add() that finds the object by hashing and comparing
738 // with some other type, to avoid the cost of type conversion if the object
739 // is already in the table.
740 template <typename HashTranslator, typename T, typename Extra>
741 AddResult add(T&& key, Extra&&);
742 template <typename HashTranslator, typename T, typename Extra>
743 AddResult addPassingHashCode(T&& key, Extra&&);
744
745 iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); }
746 const_iterator find(KeyPeekInType key) const {
747 return find<IdentityTranslatorType>(key);
748 }
749 bool contains(KeyPeekInType key) const {
750 return contains<IdentityTranslatorType>(key);
751 }
752
753 template <typename HashTranslator, typename T>
754 iterator find(const T&);
755 template <typename HashTranslator, typename T>
756 const_iterator find(const T&) const;
757 template <typename HashTranslator, typename T>
758 bool contains(const T&) const;
759
760 void remove(KeyPeekInType);
761 void remove(iterator);
762 void remove(const_iterator);
763 void clear();
764
765 static bool isEmptyBucket(const ValueType& value) {
766 return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value));
767 }
768 static bool isDeletedBucket(const ValueType& value) {
769 return KeyTraits::isDeletedValue(Extractor::extract(value));
770 }
771 static bool isEmptyOrDeletedBucket(const ValueType& value) {
772 return HashTableHelper<ValueType, Extractor,
773 KeyTraits>::isEmptyOrDeletedBucket(value);
774 }
775
776 ValueType* lookup(KeyPeekInType key) {
777 return lookup<IdentityTranslatorType, KeyPeekInType>(key);
778 }
779 template <typename HashTranslator, typename T>
780 ValueType* lookup(const T&);
781 template <typename HashTranslator, typename T>
782 const ValueType* lookup(const T&) const;
783
784 template <typename VisitorDispatcher>
785 void trace(VisitorDispatcher);
786
787 #if DCHECK_IS_ON()
788 void enterAccessForbiddenScope() {
789 DCHECK(!m_accessForbidden);
790 m_accessForbidden = true;
791 }
792 void leaveAccessForbiddenScope() { m_accessForbidden = false; }
793 bool accessForbidden() const { return m_accessForbidden; }
794 int64_t modifications() const { return m_modifications; }
795 void registerModification() { m_modifications++; }
796 // HashTable and collections that build on it do not support modifications
797 // while there is an iterator in use. The exception is ListHashSet, which
798 // has its own iterators that tolerate modification of the underlying set.
799 void checkModifications(int64_t mods) const {
800 DCHECK_EQ(mods, m_modifications);
801 }
802 #else
803 ALWAYS_INLINE void enterAccessForbiddenScope() {}
804 ALWAYS_INLINE void leaveAccessForbiddenScope() {}
805 ALWAYS_INLINE bool accessForbidden() const { return false; }
806 ALWAYS_INLINE int64_t modifications() const { return 0; }
807 ALWAYS_INLINE void registerModification() {}
808 ALWAYS_INLINE void checkModifications(int64_t mods) const {}
809 #endif
810
811 private:
812 static ValueType* allocateTable(unsigned size);
813 static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size);
814
815 typedef std::pair<ValueType*, bool> LookupType;
816 typedef std::pair<LookupType, unsigned> FullLookupType;
817
818 LookupType lookupForWriting(const Key& key) {
819 return lookupForWriting<IdentityTranslatorType>(key);
820 }
821 template <typename HashTranslator, typename T>
822 FullLookupType fullLookupForWriting(const T&);
823 template <typename HashTranslator, typename T>
824 LookupType lookupForWriting(const T&);
825
826 void remove(ValueType*);
827
828 bool shouldExpand() const {
829 return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize;
830 }
831 bool mustRehashInPlace() const {
832 return m_keyCount * m_minLoad < m_tableSize * 2;
833 }
834 bool shouldShrink() const {
835 // isAllocationAllowed check should be at the last because it's
836 // expensive.
837 return m_keyCount * m_minLoad < m_tableSize &&
838 m_tableSize > KeyTraits::minimumTableSize &&
839 Allocator::isAllocationAllowed();
840 }
841 ValueType* expand(ValueType* entry = 0);
842 void shrink() { rehash(m_tableSize / 2, 0); }
843
844 ValueType* expandBuffer(unsigned newTableSize, ValueType* entry, bool&);
845 ValueType* rehashTo(ValueType* newTable,
846 unsigned newTableSize,
847 ValueType* entry);
848 ValueType* rehash(unsigned newTableSize, ValueType* entry);
849 ValueType* reinsert(ValueType&&);
850
851 static void initializeBucket(ValueType& bucket);
852 static void deleteBucket(ValueType& bucket) {
853 bucket.~ValueType();
854 Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected);
855 }
856
857 FullLookupType makeLookupResult(ValueType* position,
858 bool found,
859 unsigned hash) {
860 return FullLookupType(LookupType(position, found), hash);
861 }
862
863 iterator makeIterator(ValueType* pos) {
864 return iterator(pos, m_table + m_tableSize, this);
865 }
866 const_iterator makeConstIterator(ValueType* pos) const {
867 return const_iterator(pos, m_table + m_tableSize, this);
868 }
869 iterator makeKnownGoodIterator(ValueType* pos) {
870 return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood);
871 }
872 const_iterator makeKnownGoodConstIterator(ValueType* pos) const {
873 return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood);
874 }
875
876 static const unsigned m_maxLoad = 2;
877 static const unsigned m_minLoad = 6;
878
879 unsigned tableSizeMask() const {
880 size_t mask = m_tableSize - 1;
881 DCHECK_EQ((mask & m_tableSize), 0u);
882 return mask;
883 }
884
885 void setEnqueued() { m_queueFlag = true; }
886 void clearEnqueued() { m_queueFlag = false; }
887 bool enqueued() { return m_queueFlag; }
888
889 ValueType* m_table;
890 unsigned m_tableSize;
891 unsigned m_keyCount;
892 #if DCHECK_IS_ON()
893 unsigned m_deletedCount : 30;
894 unsigned m_queueFlag : 1;
895 unsigned m_accessForbidden : 1;
896 unsigned m_modifications;
897 #else
898 unsigned m_deletedCount : 31;
899 unsigned m_queueFlag : 1;
900 #endif
901
902 #if DUMP_HASHTABLE_STATS_PER_TABLE
903 public:
904 mutable
905 typename std::conditional<Allocator::isGarbageCollected,
906 HashTableStats*,
907 std::unique_ptr<HashTableStats>>::type m_stats;
908 #endif
909
910 template <WeakHandlingFlag x,
911 typename T,
912 typename U,
913 typename V,
914 typename W,
915 typename X,
916 typename Y,
917 typename Z>
918 friend struct WeakProcessingHashTableHelper;
919 template <typename T, typename U, typename V, typename W>
920 friend class LinkedHashSet;
921 };
922
923 template <typename Key,
924 typename Value,
925 typename Extractor,
926 typename HashFunctions,
927 typename Traits,
928 typename KeyTraits,
929 typename Allocator>
930 inline HashTable<Key,
931 Value,
932 Extractor,
933 HashFunctions,
934 Traits,
935 KeyTraits,
936 Allocator>::HashTable()
937 : m_table(nullptr),
938 m_tableSize(0),
939 m_keyCount(0),
940 m_deletedCount(0),
941 m_queueFlag(false)
942 #if DCHECK_IS_ON()
943 ,
944 m_accessForbidden(false),
945 m_modifications(0)
946 #endif
947 #if DUMP_HASHTABLE_STATS_PER_TABLE
948 ,
949 m_stats(nullptr)
950 #endif
951 {
952 static_assert(Allocator::isGarbageCollected ||
953 (!IsPointerToGarbageCollectedType<Key>::value &&
954 !IsPointerToGarbageCollectedType<Value>::value),
955 "Cannot put raw pointers to garbage-collected classes into an "
956 "off-heap collection.");
957 }
958
959 inline unsigned doubleHash(unsigned key) {
960 key = ~key + (key >> 23);
961 key ^= (key << 12);
962 key ^= (key >> 7);
963 key ^= (key << 2);
964 key ^= (key >> 20);
965 return key;
966 }
967
968 inline unsigned calculateCapacity(unsigned size) {
969 for (unsigned mask = size; mask; mask >>= 1)
970 size |= mask; // 00110101010 -> 00111111111
971 return (size + 1) * 2; // 00111111111 -> 10000000000
972 }
973
974 template <typename Key,
975 typename Value,
976 typename Extractor,
977 typename HashFunctions,
978 typename Traits,
979 typename KeyTraits,
980 typename Allocator>
981 void HashTable<Key,
982 Value,
983 Extractor,
984 HashFunctions,
985 Traits,
986 KeyTraits,
987 Allocator>::reserveCapacityForSize(unsigned newSize) {
988 unsigned newCapacity = calculateCapacity(newSize);
989 if (newCapacity < KeyTraits::minimumTableSize)
990 newCapacity = KeyTraits::minimumTableSize;
991
992 if (newCapacity > capacity()) {
993 RELEASE_ASSERT(!static_cast<int>(
994 newCapacity >>
995 31)); // HashTable capacity should not overflow 32bit int.
996 rehash(newCapacity, 0);
997 }
998 }
999
1000 template <typename Key,
1001 typename Value,
1002 typename Extractor,
1003 typename HashFunctions,
1004 typename Traits,
1005 typename KeyTraits,
1006 typename Allocator>
1007 template <typename HashTranslator, typename T>
1008 inline Value*
1009 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1010 lookup(const T& key) {
1011 return const_cast<Value*>(
1012 const_cast<const HashTable*>(this)->lookup<HashTranslator>(key));
1013 }
1014
1015 template <typename Key,
1016 typename Value,
1017 typename Extractor,
1018 typename HashFunctions,
1019 typename Traits,
1020 typename KeyTraits,
1021 typename Allocator>
1022 template <typename HashTranslator, typename T>
1023 inline const Value*
1024 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1025 lookup(const T& key) const {
1026 DCHECK(!accessForbidden());
1027 DCHECK((HashTableKeyChecker<
1028 HashTranslator, KeyTraits,
1029 HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key)));
1030 const ValueType* table = m_table;
1031 if (!table)
1032 return nullptr;
1033
1034 size_t k = 0;
1035 size_t sizeMask = tableSizeMask();
1036 unsigned h = HashTranslator::hash(key);
1037 size_t i = h & sizeMask;
1038
1039 UPDATE_ACCESS_COUNTS();
1040
1041 while (1) {
1042 const ValueType* entry = table + i;
1043
1044 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
1045 if (HashTranslator::equal(Extractor::extract(*entry), key))
1046 return entry;
1047
1048 if (isEmptyBucket(*entry))
1049 return nullptr;
1050 } else {
1051 if (isEmptyBucket(*entry))
1052 return nullptr;
1053
1054 if (!isDeletedBucket(*entry) &&
1055 HashTranslator::equal(Extractor::extract(*entry), key))
1056 return entry;
1057 }
1058 UPDATE_PROBE_COUNTS();
1059 if (!k)
1060 k = 1 | doubleHash(h);
1061 i = (i + k) & sizeMask;
1062 }
1063 }
1064
1065 template <typename Key,
1066 typename Value,
1067 typename Extractor,
1068 typename HashFunctions,
1069 typename Traits,
1070 typename KeyTraits,
1071 typename Allocator>
1072 template <typename HashTranslator, typename T>
1073 inline typename HashTable<Key,
1074 Value,
1075 Extractor,
1076 HashFunctions,
1077 Traits,
1078 KeyTraits,
1079 Allocator>::LookupType
1080 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1081 lookupForWriting(const T& key) {
1082 DCHECK(!accessForbidden());
1083 DCHECK(m_table);
1084 registerModification();
1085
1086 ValueType* table = m_table;
1087 size_t k = 0;
1088 size_t sizeMask = tableSizeMask();
1089 unsigned h = HashTranslator::hash(key);
1090 size_t i = h & sizeMask;
1091
1092 UPDATE_ACCESS_COUNTS();
1093
1094 ValueType* deletedEntry = nullptr;
1095
1096 while (1) {
1097 ValueType* entry = table + i;
1098
1099 if (isEmptyBucket(*entry))
1100 return LookupType(deletedEntry ? deletedEntry : entry, false);
1101
1102 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
1103 if (HashTranslator::equal(Extractor::extract(*entry), key))
1104 return LookupType(entry, true);
1105
1106 if (isDeletedBucket(*entry))
1107 deletedEntry = entry;
1108 } else {
1109 if (isDeletedBucket(*entry))
1110 deletedEntry = entry;
1111 else if (HashTranslator::equal(Extractor::extract(*entry), key))
1112 return LookupType(entry, true);
1113 }
1114 UPDATE_PROBE_COUNTS();
1115 if (!k)
1116 k = 1 | doubleHash(h);
1117 i = (i + k) & sizeMask;
1118 }
1119 }
1120
1121 template <typename Key,
1122 typename Value,
1123 typename Extractor,
1124 typename HashFunctions,
1125 typename Traits,
1126 typename KeyTraits,
1127 typename Allocator>
1128 template <typename HashTranslator, typename T>
1129 inline typename HashTable<Key,
1130 Value,
1131 Extractor,
1132 HashFunctions,
1133 Traits,
1134 KeyTraits,
1135 Allocator>::FullLookupType
1136 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1137 fullLookupForWriting(const T& key) {
1138 DCHECK(!accessForbidden());
1139 DCHECK(m_table);
1140 registerModification();
1141
1142 ValueType* table = m_table;
1143 size_t k = 0;
1144 size_t sizeMask = tableSizeMask();
1145 unsigned h = HashTranslator::hash(key);
1146 size_t i = h & sizeMask;
1147
1148 UPDATE_ACCESS_COUNTS();
1149
1150 ValueType* deletedEntry = nullptr;
1151
1152 while (1) {
1153 ValueType* entry = table + i;
1154
1155 if (isEmptyBucket(*entry))
1156 return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h);
1157
1158 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
1159 if (HashTranslator::equal(Extractor::extract(*entry), key))
1160 return makeLookupResult(entry, true, h);
1161
1162 if (isDeletedBucket(*entry))
1163 deletedEntry = entry;
1164 } else {
1165 if (isDeletedBucket(*entry))
1166 deletedEntry = entry;
1167 else if (HashTranslator::equal(Extractor::extract(*entry), key))
1168 return makeLookupResult(entry, true, h);
1169 }
1170 UPDATE_PROBE_COUNTS();
1171 if (!k)
1172 k = 1 | doubleHash(h);
1173 i = (i + k) & sizeMask;
1174 }
1175 }
1176
1177 template <bool emptyValueIsZero>
1178 struct HashTableBucketInitializer;
1179
1180 template <>
1181 struct HashTableBucketInitializer<false> {
1182 STATIC_ONLY(HashTableBucketInitializer);
1183 template <typename Traits, typename Value>
1184 static void initialize(Value& bucket) {
1185 new (NotNull, &bucket) Value(Traits::emptyValue());
1186 }
1187 };
1188
1189 template <>
1190 struct HashTableBucketInitializer<true> {
1191 STATIC_ONLY(HashTableBucketInitializer);
1192 template <typename Traits, typename Value>
1193 static void initialize(Value& bucket) {
1194 // This initializes the bucket without copying the empty value. That
1195 // makes it possible to use this with types that don't support copying.
1196 // The memset to 0 looks like a slow operation but is optimized by the
1197 // compilers.
1198 memset(&bucket, 0, sizeof(bucket));
1199 }
1200 };
1201
1202 template <typename Key,
1203 typename Value,
1204 typename Extractor,
1205 typename HashFunctions,
1206 typename Traits,
1207 typename KeyTraits,
1208 typename Allocator>
1209 inline void
1210 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1211 initializeBucket(ValueType& bucket) {
1212 HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<
1213 Traits>(bucket);
1214 }
1215
1216 template <typename Key,
1217 typename Value,
1218 typename Extractor,
1219 typename HashFunctions,
1220 typename Traits,
1221 typename KeyTraits,
1222 typename Allocator>
1223 template <typename HashTranslator, typename T, typename Extra>
1224 typename HashTable<Key,
1225 Value,
1226 Extractor,
1227 HashFunctions,
1228 Traits,
1229 KeyTraits,
1230 Allocator>::AddResult
1231 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1232 add(T&& key, Extra&& extra) {
1233 DCHECK(!accessForbidden());
1234 DCHECK(Allocator::isAllocationAllowed());
1235 if (!m_table)
1236 expand();
1237
1238 DCHECK(m_table);
1239
1240 ValueType* table = m_table;
1241 size_t k = 0;
1242 size_t sizeMask = tableSizeMask();
1243 unsigned h = HashTranslator::hash(key);
1244 size_t i = h & sizeMask;
1245
1246 UPDATE_ACCESS_COUNTS();
1247
1248 ValueType* deletedEntry = nullptr;
1249 ValueType* entry;
1250 while (1) {
1251 entry = table + i;
1252
1253 if (isEmptyBucket(*entry))
1254 break;
1255
1256 if (HashFunctions::safeToCompareToEmptyOrDeleted) {
1257 if (HashTranslator::equal(Extractor::extract(*entry), key))
1258 return AddResult(this, entry, false);
1259
1260 if (isDeletedBucket(*entry))
1261 deletedEntry = entry;
1262 } else {
1263 if (isDeletedBucket(*entry))
1264 deletedEntry = entry;
1265 else if (HashTranslator::equal(Extractor::extract(*entry), key))
1266 return AddResult(this, entry, false);
1267 }
1268 UPDATE_PROBE_COUNTS();
1269 if (!k)
1270 k = 1 | doubleHash(h);
1271 i = (i + k) & sizeMask;
1272 }
1273
1274 registerModification();
1275
1276 if (deletedEntry) {
1277 // Overwrite any data left over from last use, using placement new or
1278 // memset.
1279 initializeBucket(*deletedEntry);
1280 entry = deletedEntry;
1281 --m_deletedCount;
1282 }
1283
1284 HashTranslator::translate(*entry, std::forward<T>(key),
1285 std::forward<Extra>(extra));
1286 DCHECK(!isEmptyOrDeletedBucket(*entry));
1287
1288 ++m_keyCount;
1289
1290 if (shouldExpand()) {
1291 entry = expand(entry);
1292 } else if (Traits::weakHandlingFlag == WeakHandlingInCollections &&
1293 shouldShrink()) {
1294 // When weak hash tables are processed by the garbage collector,
1295 // elements with no other strong references to them will have their
1296 // table entries cleared. But no shrinking of the backing store is
1297 // allowed at that time, as allocations are prohibited during that
1298 // GC phase.
1299 //
1300 // With that weak processing taking care of removals, explicit
1301 // remove()s of elements is rarely done. Which implies that the
1302 // weak hash table will never be checked if it can be shrunk.
1303 //
1304 // To prevent weak hash tables with very low load factors from
1305 // developing, we perform it when adding elements instead.
1306 entry = rehash(m_tableSize / 2, entry);
1307 }
1308
1309 return AddResult(this, entry, true);
1310 }
1311
1312 template <typename Key,
1313 typename Value,
1314 typename Extractor,
1315 typename HashFunctions,
1316 typename Traits,
1317 typename KeyTraits,
1318 typename Allocator>
1319 template <typename HashTranslator, typename T, typename Extra>
1320 typename HashTable<Key,
1321 Value,
1322 Extractor,
1323 HashFunctions,
1324 Traits,
1325 KeyTraits,
1326 Allocator>::AddResult
1327 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1328 addPassingHashCode(T&& key, Extra&& extra) {
1329 DCHECK(!accessForbidden());
1330 DCHECK(Allocator::isAllocationAllowed());
1331 if (!m_table)
1332 expand();
1333
1334 FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key);
1335
1336 ValueType* entry = lookupResult.first.first;
1337 bool found = lookupResult.first.second;
1338 unsigned h = lookupResult.second;
1339
1340 if (found)
1341 return AddResult(this, entry, false);
1342
1343 registerModification();
1344
1345 if (isDeletedBucket(*entry)) {
1346 initializeBucket(*entry);
1347 --m_deletedCount;
1348 }
1349
1350 HashTranslator::translate(*entry, std::forward<T>(key),
1351 std::forward<Extra>(extra), h);
1352 DCHECK(!isEmptyOrDeletedBucket(*entry));
1353
1354 ++m_keyCount;
1355 if (shouldExpand())
1356 entry = expand(entry);
1357
1358 return AddResult(this, entry, true);
1359 }
1360
1361 template <typename Key,
1362 typename Value,
1363 typename Extractor,
1364 typename HashFunctions,
1365 typename Traits,
1366 typename KeyTraits,
1367 typename Allocator>
1368 Value*
1369 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1370 reinsert(ValueType&& entry) {
1371 DCHECK(m_table);
1372 registerModification();
1373 DCHECK(!lookupForWriting(Extractor::extract(entry)).second);
1374 DCHECK(
1375 !isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first)));
1376 #if DUMP_HASHTABLE_STATS
1377 atomicIncrement(&HashTableStats::instance().numReinserts);
1378 #endif
1379 #if DUMP_HASHTABLE_STATS_PER_TABLE
1380 ++m_stats->numReinserts;
1381 #endif
1382 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first;
1383 Mover<ValueType, Allocator,
1384 Traits::template NeedsToForbidGCOnMove<>::value>::move(std::move(entry),
1385 *newEntry);
1386
1387 return newEntry;
1388 }
1389
1390 template <typename Key,
1391 typename Value,
1392 typename Extractor,
1393 typename HashFunctions,
1394 typename Traits,
1395 typename KeyTraits,
1396 typename Allocator>
1397 template <typename HashTranslator, typename T>
1398 inline typename HashTable<Key,
1399 Value,
1400 Extractor,
1401 HashFunctions,
1402 Traits,
1403 KeyTraits,
1404 Allocator>::iterator
1405 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1406 find(const T& key) {
1407 ValueType* entry = lookup<HashTranslator>(key);
1408 if (!entry)
1409 return end();
1410
1411 return makeKnownGoodIterator(entry);
1412 }
1413
1414 template <typename Key,
1415 typename Value,
1416 typename Extractor,
1417 typename HashFunctions,
1418 typename Traits,
1419 typename KeyTraits,
1420 typename Allocator>
1421 template <typename HashTranslator, typename T>
1422 inline typename HashTable<Key,
1423 Value,
1424 Extractor,
1425 HashFunctions,
1426 Traits,
1427 KeyTraits,
1428 Allocator>::const_iterator
1429 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1430 find(const T& key) const {
1431 ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
1432 if (!entry)
1433 return end();
1434
1435 return makeKnownGoodConstIterator(entry);
1436 }
1437
1438 template <typename Key,
1439 typename Value,
1440 typename Extractor,
1441 typename HashFunctions,
1442 typename Traits,
1443 typename KeyTraits,
1444 typename Allocator>
1445 template <typename HashTranslator, typename T>
1446 bool HashTable<Key,
1447 Value,
1448 Extractor,
1449 HashFunctions,
1450 Traits,
1451 KeyTraits,
1452 Allocator>::contains(const T& key) const {
1453 return const_cast<HashTable*>(this)->lookup<HashTranslator>(key);
1454 }
1455
1456 template <typename Key,
1457 typename Value,
1458 typename Extractor,
1459 typename HashFunctions,
1460 typename Traits,
1461 typename KeyTraits,
1462 typename Allocator>
1463 void HashTable<Key,
1464 Value,
1465 Extractor,
1466 HashFunctions,
1467 Traits,
1468 KeyTraits,
1469 Allocator>::remove(ValueType* pos) {
1470 registerModification();
1471 #if DUMP_HASHTABLE_STATS
1472 atomicIncrement(&HashTableStats::instance().numRemoves);
1473 #endif
1474 #if DUMP_HASHTABLE_STATS_PER_TABLE
1475 ++m_stats->numRemoves;
1476 #endif
1477
1478 enterAccessForbiddenScope();
1479 deleteBucket(*pos);
1480 leaveAccessForbiddenScope();
1481 ++m_deletedCount;
1482 --m_keyCount;
1483
1484 if (shouldShrink())
1485 shrink();
1486 }
1487
1488 template <typename Key,
1489 typename Value,
1490 typename Extractor,
1491 typename HashFunctions,
1492 typename Traits,
1493 typename KeyTraits,
1494 typename Allocator>
1495 inline void
1496 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1497 remove(iterator it) {
1498 if (it == end())
1499 return;
1500 remove(const_cast<ValueType*>(it.m_iterator.m_position));
1501 }
1502
1503 template <typename Key,
1504 typename Value,
1505 typename Extractor,
1506 typename HashFunctions,
1507 typename Traits,
1508 typename KeyTraits,
1509 typename Allocator>
1510 inline void
1511 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1512 remove(const_iterator it) {
1513 if (it == end())
1514 return;
1515 remove(const_cast<ValueType*>(it.m_position));
1516 }
1517
1518 template <typename Key,
1519 typename Value,
1520 typename Extractor,
1521 typename HashFunctions,
1522 typename Traits,
1523 typename KeyTraits,
1524 typename Allocator>
1525 inline void
1526 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1527 remove(KeyPeekInType key) {
1528 remove(find(key));
1529 }
1530
1531 template <typename Key,
1532 typename Value,
1533 typename Extractor,
1534 typename HashFunctions,
1535 typename Traits,
1536 typename KeyTraits,
1537 typename Allocator>
1538 Value*
1539 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1540 allocateTable(unsigned size) {
1541 size_t allocSize = size * sizeof(ValueType);
1542 ValueType* result;
1543 // Assert that we will not use memset on things with a vtable entry. The
1544 // compiler will also check this on some platforms. We would like to check
1545 // this on the whole value (key-value pair), but std::is_polymorphic will
1546 // return false for a pair of two types, even if one of the components is
1547 // polymorphic.
1548 static_assert(
1549 !Traits::emptyValueIsZero || !std::is_polymorphic<KeyType>::value,
1550 "empty value cannot be zero for things with a vtable");
1551 static_assert(Allocator::isGarbageCollected ||
1552 ((!AllowsOnlyPlacementNew<KeyType>::value ||
1553 !IsTraceable<KeyType>::value) &&
1554 (!AllowsOnlyPlacementNew<ValueType>::value ||
1555 !IsTraceable<ValueType>::value)),
1556 "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that "
1557 "have trace methods into an off-heap HashTable");
1558
1559 if (Traits::emptyValueIsZero) {
1560 result = Allocator::template allocateZeroedHashTableBacking<ValueType,
1561 HashTable>(
1562 allocSize);
1563 } else {
1564 result = Allocator::template allocateHashTableBacking<ValueType, HashTable>(
1565 allocSize);
1566 for (unsigned i = 0; i < size; i++)
1567 initializeBucket(result[i]);
1568 }
1569 return result;
1570 }
1571
1572 template <typename Key,
1573 typename Value,
1574 typename Extractor,
1575 typename HashFunctions,
1576 typename Traits,
1577 typename KeyTraits,
1578 typename Allocator>
1579 void HashTable<Key,
1580 Value,
1581 Extractor,
1582 HashFunctions,
1583 Traits,
1584 KeyTraits,
1585 Allocator>::deleteAllBucketsAndDeallocate(ValueType* table,
1586 unsigned size) {
1587 if (!IsTriviallyDestructible<ValueType>::value) {
1588 for (unsigned i = 0; i < size; ++i) {
1589 // This code is called when the hash table is cleared or resized. We
1590 // have allocated a new backing store and we need to run the
1591 // destructors on the old backing store, as it is being freed. If we
1592 // are GCing we need to both call the destructor and mark the bucket
1593 // as deleted, otherwise the destructor gets called again when the
1594 // GC finds the backing store. With the default allocator it's
1595 // enough to call the destructor, since we will free the memory
1596 // explicitly and we won't see the memory with the bucket again.
1597 if (Allocator::isGarbageCollected) {
1598 if (!isEmptyOrDeletedBucket(table[i]))
1599 deleteBucket(table[i]);
1600 } else {
1601 if (!isDeletedBucket(table[i]))
1602 table[i].~ValueType();
1603 }
1604 }
1605 }
1606 Allocator::freeHashTableBacking(table);
1607 }
1608
1609 template <typename Key,
1610 typename Value,
1611 typename Extractor,
1612 typename HashFunctions,
1613 typename Traits,
1614 typename KeyTraits,
1615 typename Allocator>
1616 Value*
1617 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1618 expand(Value* entry) {
1619 unsigned newSize;
1620 if (!m_tableSize) {
1621 newSize = KeyTraits::minimumTableSize;
1622 } else if (mustRehashInPlace()) {
1623 newSize = m_tableSize;
1624 } else {
1625 newSize = m_tableSize * 2;
1626 RELEASE_ASSERT(newSize > m_tableSize);
1627 }
1628
1629 return rehash(newSize, entry);
1630 }
1631
1632 template <typename Key,
1633 typename Value,
1634 typename Extractor,
1635 typename HashFunctions,
1636 typename Traits,
1637 typename KeyTraits,
1638 typename Allocator>
1639 Value*
1640 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1641 expandBuffer(unsigned newTableSize, Value* entry, bool& success) {
1642 success = false;
1643 DCHECK_LT(m_tableSize, newTableSize);
1644 if (!Allocator::expandHashTableBacking(m_table,
1645 newTableSize * sizeof(ValueType)))
1646 return nullptr;
1647
1648 success = true;
1649
1650 Value* newEntry = nullptr;
1651 unsigned oldTableSize = m_tableSize;
1652 ValueType* originalTable = m_table;
1653
1654 ValueType* temporaryTable = allocateTable(oldTableSize);
1655 for (unsigned i = 0; i < oldTableSize; i++) {
1656 if (&m_table[i] == entry)
1657 newEntry = &temporaryTable[i];
1658 if (isEmptyOrDeletedBucket(m_table[i])) {
1659 DCHECK_NE(&m_table[i], entry);
1660 if (Traits::emptyValueIsZero) {
1661 memset(&temporaryTable[i], 0, sizeof(ValueType));
1662 } else {
1663 initializeBucket(temporaryTable[i]);
1664 }
1665 } else {
1666 Mover<ValueType, Allocator,
1667 Traits::template NeedsToForbidGCOnMove<>::value>::
1668 move(std::move(m_table[i]), temporaryTable[i]);
1669 }
1670 }
1671 m_table = temporaryTable;
1672
1673 if (Traits::emptyValueIsZero) {
1674 memset(originalTable, 0, newTableSize * sizeof(ValueType));
1675 } else {
1676 for (unsigned i = 0; i < newTableSize; i++)
1677 initializeBucket(originalTable[i]);
1678 }
1679 newEntry = rehashTo(originalTable, newTableSize, newEntry);
1680
1681 enterAccessForbiddenScope();
1682 deleteAllBucketsAndDeallocate(temporaryTable, oldTableSize);
1683 leaveAccessForbiddenScope();
1684
1685 return newEntry;
1686 }
1687
1688 template <typename Key,
1689 typename Value,
1690 typename Extractor,
1691 typename HashFunctions,
1692 typename Traits,
1693 typename KeyTraits,
1694 typename Allocator>
1695 Value*
1696 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1697 rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry) {
1698 unsigned oldTableSize = m_tableSize;
1699 ValueType* oldTable = m_table;
1700
1701 #if DUMP_HASHTABLE_STATS
1702 if (oldTableSize != 0)
1703 atomicIncrement(&HashTableStats::instance().numRehashes);
1704 #endif
1705
1706 #if DUMP_HASHTABLE_STATS_PER_TABLE
1707 if (oldTableSize != 0)
1708 ++m_stats->numRehashes;
1709 #endif
1710
1711 m_table = newTable;
1712 m_tableSize = newTableSize;
1713
1714 Value* newEntry = nullptr;
1715 for (unsigned i = 0; i != oldTableSize; ++i) {
1716 if (isEmptyOrDeletedBucket(oldTable[i])) {
1717 DCHECK_NE(&oldTable[i], entry);
1718 continue;
1719 }
1720 Value* reinsertedEntry = reinsert(std::move(oldTable[i]));
1721 if (&oldTable[i] == entry) {
1722 DCHECK(!newEntry);
1723 newEntry = reinsertedEntry;
1724 }
1725 }
1726
1727 m_deletedCount = 0;
1728
1729 #if DUMP_HASHTABLE_STATS_PER_TABLE
1730 if (!m_stats)
1731 m_stats = HashTableStatsPtr<Allocator>::create();
1732 #endif
1733
1734 return newEntry;
1735 }
1736
1737 template <typename Key,
1738 typename Value,
1739 typename Extractor,
1740 typename HashFunctions,
1741 typename Traits,
1742 typename KeyTraits,
1743 typename Allocator>
1744 Value*
1745 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1746 rehash(unsigned newTableSize, Value* entry) {
1747 unsigned oldTableSize = m_tableSize;
1748 ValueType* oldTable = m_table;
1749
1750 #if DUMP_HASHTABLE_STATS
1751 if (oldTableSize != 0)
1752 atomicIncrement(&HashTableStats::instance().numRehashes);
1753 #endif
1754
1755 #if DUMP_HASHTABLE_STATS_PER_TABLE
1756 if (oldTableSize != 0)
1757 ++m_stats->numRehashes;
1758 #endif
1759
1760 // The Allocator::isGarbageCollected check is not needed. The check is just
1761 // a static hint for a compiler to indicate that Base::expandBuffer returns
1762 // false if Allocator is a PartitionAllocator.
1763 if (Allocator::isGarbageCollected && newTableSize > oldTableSize) {
1764 bool success;
1765 Value* newEntry = expandBuffer(newTableSize, entry, success);
1766 if (success)
1767 return newEntry;
1768 }
1769
1770 ValueType* newTable = allocateTable(newTableSize);
1771 Value* newEntry = rehashTo(newTable, newTableSize, entry);
1772
1773 enterAccessForbiddenScope();
1774 deleteAllBucketsAndDeallocate(oldTable, oldTableSize);
1775 leaveAccessForbiddenScope();
1776
1777 return newEntry;
1778 }
1779
1780 template <typename Key,
1781 typename Value,
1782 typename Extractor,
1783 typename HashFunctions,
1784 typename Traits,
1785 typename KeyTraits,
1786 typename Allocator>
1787 void HashTable<Key,
1788 Value,
1789 Extractor,
1790 HashFunctions,
1791 Traits,
1792 KeyTraits,
1793 Allocator>::clear() {
1794 registerModification();
1795 if (!m_table)
1796 return;
1797
1798 enterAccessForbiddenScope();
1799 deleteAllBucketsAndDeallocate(m_table, m_tableSize);
1800 leaveAccessForbiddenScope();
1801 m_table = nullptr;
1802 m_tableSize = 0;
1803 m_keyCount = 0;
1804 }
1805
1806 template <typename Key,
1807 typename Value,
1808 typename Extractor,
1809 typename HashFunctions,
1810 typename Traits,
1811 typename KeyTraits,
1812 typename Allocator>
1813 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1814 HashTable(const HashTable& other)
1815 : m_table(nullptr),
1816 m_tableSize(0),
1817 m_keyCount(0),
1818 m_deletedCount(0),
1819 m_queueFlag(false)
1820 #if DCHECK_IS_ON()
1821 ,
1822 m_accessForbidden(false),
1823 m_modifications(0)
1824 #endif
1825 #if DUMP_HASHTABLE_STATS_PER_TABLE
1826 ,
1827 m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats))
1828 #endif
1829 {
1830 if (other.size())
1831 reserveCapacityForSize(other.size());
1832 // Copy the hash table the dumb way, by adding each element to the new
1833 // table. It might be more efficient to copy the table slots, but it's not
1834 // clear that efficiency is needed.
1835 for (const auto& element : other)
1836 add(element);
1837 }
1838
1839 template <typename Key,
1840 typename Value,
1841 typename Extractor,
1842 typename HashFunctions,
1843 typename Traits,
1844 typename KeyTraits,
1845 typename Allocator>
1846 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1847 HashTable(HashTable&& other)
1848 : m_table(nullptr),
1849 m_tableSize(0),
1850 m_keyCount(0),
1851 m_deletedCount(0),
1852 m_queueFlag(false)
1853 #if DCHECK_IS_ON()
1854 ,
1855 m_accessForbidden(false),
1856 m_modifications(0)
1857 #endif
1858 #if DUMP_HASHTABLE_STATS_PER_TABLE
1859 ,
1860 m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats))
1861 #endif
1862 {
1863 swap(other);
1864 }
1865
1866 template <typename Key,
1867 typename Value,
1868 typename Extractor,
1869 typename HashFunctions,
1870 typename Traits,
1871 typename KeyTraits,
1872 typename Allocator>
1873 void HashTable<Key,
1874 Value,
1875 Extractor,
1876 HashFunctions,
1877 Traits,
1878 KeyTraits,
1879 Allocator>::swap(HashTable& other) {
1880 DCHECK(!accessForbidden());
1881 std::swap(m_table, other.m_table);
1882 std::swap(m_tableSize, other.m_tableSize);
1883 std::swap(m_keyCount, other.m_keyCount);
1884 // std::swap does not work for bit fields.
1885 unsigned deleted = m_deletedCount;
1886 m_deletedCount = other.m_deletedCount;
1887 other.m_deletedCount = deleted;
1888 DCHECK(!m_queueFlag);
1889 DCHECK(!other.m_queueFlag);
1890
1891 #if DCHECK_IS_ON()
1892 std::swap(m_modifications, other.m_modifications);
1893 #endif
1894
1895 #if DUMP_HASHTABLE_STATS_PER_TABLE
1896 HashTableStatsPtr<Allocator>::swap(m_stats, other.m_stats);
1897 #endif
1898 }
1899
1900 template <typename Key,
1901 typename Value,
1902 typename Extractor,
1903 typename HashFunctions,
1904 typename Traits,
1905 typename KeyTraits,
1906 typename Allocator>
1907 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>&
1908 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1909 operator=(const HashTable& other) {
1910 HashTable tmp(other);
1911 swap(tmp);
1912 return *this;
1913 }
1914
1915 template <typename Key,
1916 typename Value,
1917 typename Extractor,
1918 typename HashFunctions,
1919 typename Traits,
1920 typename KeyTraits,
1921 typename Allocator>
1922 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>&
1923 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
1924 operator=(HashTable&& other) {
1925 swap(other);
1926 return *this;
1927 }
1928
1929 template <WeakHandlingFlag weakHandlingFlag,
1930 typename Key,
1931 typename Value,
1932 typename Extractor,
1933 typename HashFunctions,
1934 typename Traits,
1935 typename KeyTraits,
1936 typename Allocator>
1937 struct WeakProcessingHashTableHelper;
1938
1939 template <typename Key,
1940 typename Value,
1941 typename Extractor,
1942 typename HashFunctions,
1943 typename Traits,
1944 typename KeyTraits,
1945 typename Allocator>
1946 struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections,
1947 Key,
1948 Value,
1949 Extractor,
1950 HashFunctions,
1951 Traits,
1952 KeyTraits,
1953 Allocator> {
1954 STATIC_ONLY(WeakProcessingHashTableHelper);
1955 static void process(typename Allocator::Visitor* visitor, void* closure) {}
1956 static void ephemeronIteration(typename Allocator::Visitor* visitor,
1957 void* closure) {}
1958 static void ephemeronIterationDone(typename Allocator::Visitor* visitor,
1959 void* closure) {}
1960 };
1961
1962 template <typename Key,
1963 typename Value,
1964 typename Extractor,
1965 typename HashFunctions,
1966 typename Traits,
1967 typename KeyTraits,
1968 typename Allocator>
1969 struct WeakProcessingHashTableHelper<WeakHandlingInCollections,
1970 Key,
1971 Value,
1972 Extractor,
1973 HashFunctions,
1974 Traits,
1975 KeyTraits,
1976 Allocator> {
1977 STATIC_ONLY(WeakProcessingHashTableHelper);
1978
1979 using HashTableType = HashTable<Key,
1980 Value,
1981 Extractor,
1982 HashFunctions,
1983 Traits,
1984 KeyTraits,
1985 Allocator>;
1986 using ValueType = typename HashTableType::ValueType;
1987
1988 // Used for purely weak and for weak-and-strong tables (ephemerons).
1989 static void process(typename Allocator::Visitor* visitor, void* closure) {
1990 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
1991 if (!table->m_table)
1992 return;
1993 // Now perform weak processing (this is a no-op if the backing was
1994 // accessible through an iterator and was already marked strongly).
1995 for (ValueType* element = table->m_table + table->m_tableSize - 1;
1996 element >= table->m_table; element--) {
1997 if (!HashTableType::isEmptyOrDeletedBucket(*element)) {
1998 // At this stage calling trace can make no difference
1999 // (everything is already traced), but we use the return value
2000 // to remove things from the collection.
2001
2002 // FIXME: This should be rewritten so that this can check if the
2003 // element is dead without calling trace, which is semantically
2004 // not correct to be called in weak processing stage.
2005 if (TraceInCollectionTrait<WeakHandlingInCollections,
2006 WeakPointersActWeak, ValueType,
2007 Traits>::trace(visitor, *element)) {
2008 table->registerModification();
2009 HashTableType::deleteBucket(*element); // Also calls the destructor.
2010 table->m_deletedCount++;
2011 table->m_keyCount--;
2012 // We don't rehash the backing until the next add or delete,
2013 // because that would cause allocation during GC.
2014 }
2015 }
2016 }
2017 }
2018
2019 // Called repeatedly for tables that have both weak and strong pointers.
2020 static void ephemeronIteration(typename Allocator::Visitor* visitor,
2021 void* closure) {
2022 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
2023 DCHECK(table->m_table);
2024 // Check the hash table for elements that we now know will not be
2025 // removed by weak processing. Those elements need to have their strong
2026 // pointers traced.
2027 for (ValueType* element = table->m_table + table->m_tableSize - 1;
2028 element >= table->m_table; element--) {
2029 if (!HashTableType::isEmptyOrDeletedBucket(*element))
2030 TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak,
2031 ValueType, Traits>::trace(visitor, *element);
2032 }
2033 }
2034
2035 // Called when the ephemeron iteration is done and before running the per
2036 // thread weak processing. It is guaranteed to be called before any thread
2037 // is resumed.
2038 static void ephemeronIterationDone(typename Allocator::Visitor* visitor,
2039 void* closure) {
2040 HashTableType* table = reinterpret_cast<HashTableType*>(closure);
2041 #if DCHECK_IS_ON()
2042 DCHECK(Allocator::weakTableRegistered(visitor, table));
2043 #endif
2044 table->clearEnqueued();
2045 }
2046 };
2047
2048 template <typename Key,
2049 typename Value,
2050 typename Extractor,
2051 typename HashFunctions,
2052 typename Traits,
2053 typename KeyTraits,
2054 typename Allocator>
2055 template <typename VisitorDispatcher>
2056 void HashTable<Key,
2057 Value,
2058 Extractor,
2059 HashFunctions,
2060 Traits,
2061 KeyTraits,
2062 Allocator>::trace(VisitorDispatcher visitor) {
2063 #if DUMP_HASHTABLE_STATS_PER_TABLE
2064 Allocator::markNoTracing(visitor, m_stats);
2065 #endif
2066
2067 // If someone else already marked the backing and queued up the trace and/or
2068 // weak callback then we are done. This optimization does not happen for
2069 // ListHashSet since its iterator does not point at the backing.
2070 if (!m_table || Allocator::isHeapObjectAlive(m_table))
2071 return;
2072
2073 // Normally, we mark the backing store without performing trace. This means
2074 // it is marked live, but the pointers inside it are not marked. Instead we
2075 // will mark the pointers below. However, for backing stores that contain
2076 // weak pointers the handling is rather different. We don't mark the
2077 // backing store here, so the marking GC will leave the backing unmarked. If
2078 // the backing is found in any other way than through its HashTable (ie from
2079 // an iterator) then the mark bit will be set and the pointers will be
2080 // marked strongly, avoiding problems with iterating over things that
2081 // disappear due to weak processing while we are iterating over them. We
2082 // register the backing store pointer for delayed marking which will take
2083 // place after we know if the backing is reachable from elsewhere. We also
2084 // register a weakProcessing callback which will perform weak processing if
2085 // needed.
2086 if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) {
2087 Allocator::markNoTracing(visitor, m_table);
2088 } else {
2089 Allocator::registerDelayedMarkNoTracing(visitor, m_table);
2090 // Since we're delaying marking this HashTable, it is possible that the
2091 // registerWeakMembers is called multiple times (in rare
2092 // cases). However, it shouldn't cause any issue.
2093 Allocator::registerWeakMembers(
2094 visitor, this,
2095 WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value,
2096 Extractor, HashFunctions, Traits,
2097 KeyTraits, Allocator>::process);
2098 }
2099 // If the backing store will be moved by sweep compaction, register the
2100 // table reference pointing to the backing store object, so that the
2101 // reference is updated upon object relocation. A no-op if not enabled
2102 // by the visitor.
2103 Allocator::registerBackingStoreReference(visitor, &m_table);
2104 if (!IsTraceableInCollectionTrait<Traits>::value)
2105 return;
2106 if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
2107 // If we have both strong and weak pointers in the collection then
2108 // we queue up the collection for fixed point iteration a la
2109 // Ephemerons:
2110 // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
2111 // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
2112 #if DCHECK_IS_ON()
2113 DCHECK(!enqueued() || Allocator::weakTableRegistered(visitor, this));
2114 #endif
2115 if (!enqueued()) {
2116 Allocator::registerWeakTable(
2117 visitor, this,
2118 WeakProcessingHashTableHelper<
2119 Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions,
2120 Traits, KeyTraits, Allocator>::ephemeronIteration,
2121 WeakProcessingHashTableHelper<
2122 Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions,
2123 Traits, KeyTraits, Allocator>::ephemeronIterationDone);
2124 setEnqueued();
2125 }
2126 // We don't need to trace the elements here, since registering as a
2127 // weak table above will cause them to be traced (perhaps several
2128 // times). It's better to wait until everything else is traced
2129 // before tracing the elements for the first time; this may reduce
2130 // (by one) the number of iterations needed to get to a fixed point.
2131 return;
2132 }
2133 for (ValueType* element = m_table + m_tableSize - 1; element >= m_table;
2134 element--) {
2135 if (!isEmptyOrDeletedBucket(*element))
2136 Allocator::template trace<VisitorDispatcher, ValueType, Traits>(visitor,
2137 *element);
2138 }
2139 }
2140
2141 // iterator adapters
2142
2143 template <typename HashTableType, typename Traits>
2144 struct HashTableConstIteratorAdapter {
2145 STACK_ALLOCATED();
2146 HashTableConstIteratorAdapter() {}
2147 HashTableConstIteratorAdapter(
2148 const typename HashTableType::const_iterator& impl)
2149 : m_impl(impl) {}
2150 typedef typename Traits::IteratorConstGetType GetType;
2151 typedef
2152 typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType;
2153
2154 GetType get() const {
2155 return const_cast<GetType>(SourceGetType(m_impl.get()));
2156 }
2157 typename Traits::IteratorConstReferenceType operator*() const {
2158 return Traits::getToReferenceConstConversion(get());
2159 }
2160 GetType operator->() const { return get(); }
2161
2162 HashTableConstIteratorAdapter& operator++() {
2163 ++m_impl;
2164 return *this;
2165 }
2166 // postfix ++ intentionally omitted
2167
2168 typename HashTableType::const_iterator m_impl;
2169 };
2170
2171 template <typename HashTable, typename Traits>
2172 std::ostream& operator<<(
2173 std::ostream& stream,
2174 const HashTableConstIteratorAdapter<HashTable, Traits>& iterator) {
2175 return stream << iterator.m_impl;
2176 }
2177
2178 template <typename HashTableType, typename Traits>
2179 struct HashTableIteratorAdapter {
2180 STACK_ALLOCATED();
2181 typedef typename Traits::IteratorGetType GetType;
2182 typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType;
2183
2184 HashTableIteratorAdapter() {}
2185 HashTableIteratorAdapter(const typename HashTableType::iterator& impl)
2186 : m_impl(impl) {}
2187
2188 GetType get() const {
2189 return const_cast<GetType>(SourceGetType(m_impl.get()));
2190 }
2191 typename Traits::IteratorReferenceType operator*() const {
2192 return Traits::getToReferenceConversion(get());
2193 }
2194 GetType operator->() const { return get(); }
2195
2196 HashTableIteratorAdapter& operator++() {
2197 ++m_impl;
2198 return *this;
2199 }
2200 // postfix ++ intentionally omitted
2201
2202 operator HashTableConstIteratorAdapter<HashTableType, Traits>() {
2203 typename HashTableType::const_iterator i = m_impl;
2204 return i;
2205 }
2206
2207 typename HashTableType::iterator m_impl;
2208 };
2209
2210 template <typename HashTable, typename Traits>
2211 std::ostream& operator<<(
2212 std::ostream& stream,
2213 const HashTableIteratorAdapter<HashTable, Traits>& iterator) {
2214 return stream << iterator.m_impl;
2215 }
2216
2217 template <typename T, typename U>
2218 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a,
2219 const HashTableConstIteratorAdapter<T, U>& b) {
2220 return a.m_impl == b.m_impl;
2221 }
2222
2223 template <typename T, typename U>
2224 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a,
2225 const HashTableConstIteratorAdapter<T, U>& b) {
2226 return a.m_impl != b.m_impl;
2227 }
2228
2229 template <typename T, typename U>
2230 inline bool operator==(const HashTableIteratorAdapter<T, U>& a,
2231 const HashTableIteratorAdapter<T, U>& b) {
2232 return a.m_impl == b.m_impl;
2233 }
2234
2235 template <typename T, typename U>
2236 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a,
2237 const HashTableIteratorAdapter<T, U>& b) {
2238 return a.m_impl != b.m_impl;
2239 }
2240
2241 // All 4 combinations of ==, != and Const,non const.
2242 template <typename T, typename U>
2243 inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a,
2244 const HashTableIteratorAdapter<T, U>& b) {
2245 return a.m_impl == b.m_impl;
2246 }
2247
2248 template <typename T, typename U>
2249 inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a,
2250 const HashTableIteratorAdapter<T, U>& b) {
2251 return a.m_impl != b.m_impl;
2252 }
2253
2254 template <typename T, typename U>
2255 inline bool operator==(const HashTableIteratorAdapter<T, U>& a,
2256 const HashTableConstIteratorAdapter<T, U>& b) {
2257 return a.m_impl == b.m_impl;
2258 }
2259
2260 template <typename T, typename U>
2261 inline bool operator!=(const HashTableIteratorAdapter<T, U>& a,
2262 const HashTableConstIteratorAdapter<T, U>& b) {
2263 return a.m_impl != b.m_impl;
2264 }
2265
2266 template <typename Collection1, typename Collection2>
2267 inline void removeAll(Collection1& collection, const Collection2& toBeRemoved) {
2268 if (collection.isEmpty() || toBeRemoved.isEmpty())
2269 return;
2270 typedef typename Collection2::const_iterator CollectionIterator;
2271 CollectionIterator end(toBeRemoved.end());
2272 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it)
2273 collection.erase(*it);
2274 }
2275
2276 } // namespace WTF
2277
2278 #include "wtf/HashIterators.h"
2279
2280 #endif // WTF_HashTable_h
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/wtf/HashSet.h ('k') | third_party/WebKit/Source/wtf/HashTable.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698