| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights | 2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights |
| 3 * reserved. | 3 * reserved. |
| 4 * Copyright (C) 2008 David Levin <levin@chromium.org> | 4 * Copyright (C) 2008 David Levin <levin@chromium.org> |
| 5 * | 5 * |
| 6 * This library is free software; you can redistribute it and/or | 6 * This library is free software; you can redistribute it and/or |
| 7 * modify it under the terms of the GNU Library General Public | 7 * modify it under the terms of the GNU Library General Public |
| 8 * License as published by the Free Software Foundation; either | 8 * License as published by the Free Software Foundation; either |
| 9 * version 2 of the License, or (at your option) any later version. | 9 * version 2 of the License, or (at your option) any later version. |
| 10 * | 10 * |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 #define DUMP_HASHTABLE_STATS 0 | 35 #define DUMP_HASHTABLE_STATS 0 |
| 36 #define DUMP_HASHTABLE_STATS_PER_TABLE 0 | 36 #define DUMP_HASHTABLE_STATS_PER_TABLE 0 |
| 37 | 37 |
| 38 #if DUMP_HASHTABLE_STATS | 38 #if DUMP_HASHTABLE_STATS |
| 39 #include "wtf/Atomics.h" | 39 #include "wtf/Atomics.h" |
| 40 #include "wtf/Threading.h" | 40 #include "wtf/Threading.h" |
| 41 #endif | 41 #endif |
| 42 | 42 |
| 43 #if DUMP_HASHTABLE_STATS_PER_TABLE | 43 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 44 #include "wtf/DataLog.h" | 44 #include "wtf/DataLog.h" |
| 45 #include <type_traits> |
| 45 #endif | 46 #endif |
| 46 | 47 |
| 47 #if DUMP_HASHTABLE_STATS | 48 #if DUMP_HASHTABLE_STATS |
| 48 #if DUMP_HASHTABLE_STATS_PER_TABLE | 49 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 49 #define UPDATE_PROBE_COUNTS() \ | 50 |
| 50 ++probeCount; \ | 51 #define UPDATE_PROBE_COUNTS() \ |
| 51 HashTableStats::recordCollisionAtCount(probeCount); \ | 52 ++probeCount; \ |
| 52 ++perTableProbeCount; \ | 53 HashTableStats::instance().recordCollisionAtCount(probeCount); \ |
| 54 ++perTableProbeCount; \ |
| 53 m_stats->recordCollisionAtCount(perTableProbeCount) | 55 m_stats->recordCollisionAtCount(perTableProbeCount) |
| 54 #define UPDATE_ACCESS_COUNTS() \ | 56 #define UPDATE_ACCESS_COUNTS() \ |
| 55 atomicIncrement(&HashTableStats::numAccesses); \ | 57 atomicIncrement(&HashTableStats::instance().numAccesses); \ |
| 56 int probeCount = 0; \ | 58 int probeCount = 0; \ |
| 57 ++m_stats->numAccesses; \ | 59 ++m_stats->numAccesses; \ |
| 58 int perTableProbeCount = 0 | 60 int perTableProbeCount = 0 |
| 59 #else | 61 #else |
| 60 #define UPDATE_PROBE_COUNTS() \ | 62 #define UPDATE_PROBE_COUNTS() \ |
| 61 ++probeCount; \ | 63 ++probeCount; \ |
| 62 HashTableStats::recordCollisionAtCount(probeCount) | 64 HashTableStats::instance().recordCollisionAtCount(probeCount) |
| 63 #define UPDATE_ACCESS_COUNTS() \ | 65 #define UPDATE_ACCESS_COUNTS() \ |
| 64 atomicIncrement(&HashTableStats::numAccesses); \ | 66 atomicIncrement(&HashTableStats::instance().numAccesses); \ |
| 65 int probeCount = 0 | 67 int probeCount = 0 |
| 66 #endif | 68 #endif |
| 67 #else | 69 #else |
| 68 #if DUMP_HASHTABLE_STATS_PER_TABLE | 70 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 69 #define UPDATE_PROBE_COUNTS() \ | 71 #define UPDATE_PROBE_COUNTS() \ |
| 70 ++perTableProbeCount; \ | 72 ++perTableProbeCount; \ |
| 71 m_stats->recordCollisionAtCount(perTableProbeCount) | 73 m_stats->recordCollisionAtCount(perTableProbeCount) |
| 72 #define UPDATE_ACCESS_COUNTS() \ | 74 #define UPDATE_ACCESS_COUNTS() \ |
| 73 ++m_stats->numAccesses; \ | 75 ++m_stats->numAccesses; \ |
| 74 int perTableProbeCount = 0 | 76 int perTableProbeCount = 0 |
| 75 #else | 77 #else |
| 76 #define UPDATE_PROBE_COUNTS() \ | 78 #define UPDATE_PROBE_COUNTS() \ |
| 77 do { \ | 79 do { \ |
| 78 } while (0) | 80 } while (0) |
| 79 #define UPDATE_ACCESS_COUNTS() \ | 81 #define UPDATE_ACCESS_COUNTS() \ |
| 80 do { \ | 82 do { \ |
| 81 } while (0) | 83 } while (0) |
| 82 #endif | 84 #endif |
| 83 #endif | 85 #endif |
| 84 | 86 |
| 85 namespace WTF { | 87 namespace WTF { |
| 86 | 88 |
| 87 #if DUMP_HASHTABLE_STATS | 89 #if DUMP_HASHTABLE_STATS |
| 90 struct WTF_EXPORT HashTableStats { |
| 91 HashTableStats() |
| 92 : numAccesses(0), |
| 93 numRehashes(0), |
| 94 numRemoves(0), |
| 95 numReinserts(0), |
| 96 maxCollisions(0), |
| 97 numCollisions(0), |
| 98 collisionGraph() {} |
| 88 | 99 |
| 89 struct WTF_EXPORT HashTableStats { | |
| 90 STATIC_ONLY(HashTableStats); | |
| 91 // The following variables are all atomically incremented when modified. | 100 // The following variables are all atomically incremented when modified. |
| 92 static int numAccesses; | 101 int numAccesses; |
| 93 static int numRehashes; | 102 int numRehashes; |
| 94 static int numRemoves; | 103 int numRemoves; |
| 95 static int numReinserts; | 104 int numReinserts; |
| 96 | 105 |
| 97 // The following variables are only modified in the recordCollisionAtCount | 106 // The following variables are only modified in the recordCollisionAtCount |
| 98 // method within a mutex. | 107 // method within a mutex. |
| 99 static int maxCollisions; | 108 int maxCollisions; |
| 100 static int numCollisions; | 109 int numCollisions; |
| 101 static int collisionGraph[4096]; | 110 int collisionGraph[4096]; |
| 102 | 111 |
| 103 static void recordCollisionAtCount(int count); | 112 void copy(const HashTableStats* other); |
| 104 static void dumpStats(); | 113 void recordCollisionAtCount(int count); |
| 114 void dumpStats(); |
| 115 |
| 116 static HashTableStats& instance(); |
| 117 |
| 118 template <typename VisitorDispatcher> |
| 119 void trace(VisitorDispatcher) {} |
| 105 }; | 120 }; |
| 106 | 121 |
| 122 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 123 template <typename Allocator, bool isGCType = Allocator::isGarbageCollected> |
| 124 class HashTableStatsPtr; |
| 125 |
| 126 template <typename Allocator> |
| 127 class HashTableStatsPtr<Allocator, false> final { |
| 128 STATIC_ONLY(HashTableStatsPtr); |
| 129 |
| 130 public: |
| 131 static std::unique_ptr<HashTableStats> create() { |
| 132 return wrapUnique(new HashTableStats); |
| 133 } |
| 134 |
| 135 static std::unique_ptr<HashTableStats> copy( |
| 136 const std::unique_ptr<HashTableStats>& other) { |
| 137 if (!other) |
| 138 return nullptr; |
| 139 return wrapUnique(new HashTableStats(*other)); |
| 140 } |
| 141 |
| 142 static void swap(std::unique_ptr<HashTableStats>& stats, |
| 143 std::unique_ptr<HashTableStats>& other) { |
| 144 stats.swap(other); |
| 145 } |
| 146 }; |
| 147 |
| 148 template <typename Allocator> |
| 149 class HashTableStatsPtr<Allocator, true> final { |
| 150 STATIC_ONLY(HashTableStatsPtr); |
| 151 |
| 152 public: |
| 153 static HashTableStats* create() { |
| 154 // Resort to manually allocating this POD on the vector |
| 155 // backing heap, as blink::GarbageCollected<> isn't in scope |
| 156 // in WTF. |
| 157 void* storage = reinterpret_cast<void*>( |
| 158 Allocator::template allocateVectorBacking<unsigned char>( |
| 159 sizeof(HashTableStats))); |
| 160 return new (storage) HashTableStats; |
| 161 } |
| 162 |
| 163 static HashTableStats* copy(const HashTableStats* other) { |
| 164 if (!other) |
| 165 return nullptr; |
| 166 HashTableStats* obj = create(); |
| 167 obj->copy(other); |
| 168 return obj; |
| 169 } |
| 170 |
| 171 static void swap(HashTableStats*& stats, HashTableStats*& other) { |
| 172 std::swap(stats, other); |
| 173 } |
| 174 }; |
| 175 #endif |
| 107 #endif | 176 #endif |
| 108 | 177 |
| 109 template <typename Key, | 178 template <typename Key, |
| 110 typename Value, | 179 typename Value, |
| 111 typename Extractor, | 180 typename Extractor, |
| 112 typename HashFunctions, | 181 typename HashFunctions, |
| 113 typename Traits, | 182 typename Traits, |
| 114 typename KeyTraits, | 183 typename KeyTraits, |
| 115 typename Allocator> | 184 typename Allocator> |
| 116 class HashTable; | 185 class HashTable; |
| (...skipping 472 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 589 const_iterator; | 658 const_iterator; |
| 590 typedef Traits ValueTraits; | 659 typedef Traits ValueTraits; |
| 591 typedef Key KeyType; | 660 typedef Key KeyType; |
| 592 typedef typename KeyTraits::PeekInType KeyPeekInType; | 661 typedef typename KeyTraits::PeekInType KeyPeekInType; |
| 593 typedef Value ValueType; | 662 typedef Value ValueType; |
| 594 typedef Extractor ExtractorType; | 663 typedef Extractor ExtractorType; |
| 595 typedef KeyTraits KeyTraitsType; | 664 typedef KeyTraits KeyTraitsType; |
| 596 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType; | 665 typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType; |
| 597 typedef HashTableAddResult<HashTable, ValueType> AddResult; | 666 typedef HashTableAddResult<HashTable, ValueType> AddResult; |
| 598 | 667 |
| 599 #if DUMP_HASHTABLE_STATS_PER_TABLE | |
| 600 struct Stats { | |
| 601 DISALLOW_NEW(Stats); | |
| 602 Stats() | |
| 603 : numAccesses(0), | |
| 604 numRehashes(0), | |
| 605 numRemoves(0), | |
| 606 numReinserts(0), | |
| 607 maxCollisions(0), | |
| 608 numCollisions(0), | |
| 609 collisionGraph() {} | |
| 610 | |
| 611 int numAccesses; | |
| 612 int numRehashes; | |
| 613 int numRemoves; | |
| 614 int numReinserts; | |
| 615 | |
| 616 int maxCollisions; | |
| 617 int numCollisions; | |
| 618 int collisionGraph[4096]; | |
| 619 | |
| 620 void recordCollisionAtCount(int count) { | |
| 621 if (count > maxCollisions) | |
| 622 maxCollisions = count; | |
| 623 numCollisions++; | |
| 624 collisionGraph[count]++; | |
| 625 } | |
| 626 | |
| 627 void dumpStats() { | |
| 628 dataLogF("\nWTF::HashTable::Stats dump\n\n"); | |
| 629 dataLogF("%d accesses\n", numAccesses); | |
| 630 dataLogF("%d total collisions, average %.2f probes per access\n", | |
| 631 numCollisions, | |
| 632 1.0 * (numAccesses + numCollisions) / numAccesses); | |
| 633 dataLogF("longest collision chain: %d\n", maxCollisions); | |
| 634 for (int i = 1; i <= maxCollisions; i++) { | |
| 635 dataLogF( | |
| 636 " %d lookups with exactly %d collisions (%.2f%% , %.2f%% with " | |
| 637 "this many or more)\n", | |
| 638 collisionGraph[i], i, | |
| 639 100.0 * (collisionGraph[i] - collisionGraph[i + 1]) / numAccesses, | |
| 640 100.0 * collisionGraph[i] / numAccesses); | |
| 641 } | |
| 642 dataLogF("%d rehashes\n", numRehashes); | |
| 643 dataLogF("%d reinserts\n", numReinserts); | |
| 644 } | |
| 645 }; | |
| 646 #endif | |
| 647 | |
| 648 HashTable(); | 668 HashTable(); |
| 649 void finalize() { | 669 void finalize() { |
| 650 ASSERT(!Allocator::isGarbageCollected); | 670 ASSERT(!Allocator::isGarbageCollected); |
| 651 if (LIKELY(!m_table)) | 671 if (LIKELY(!m_table)) |
| 652 return; | 672 return; |
| 653 ASSERT(!m_accessForbidden); | 673 ASSERT(!m_accessForbidden); |
| 654 #if ENABLE(ASSERT) | 674 #if ENABLE(ASSERT) |
| 655 m_accessForbidden = true; | 675 m_accessForbidden = true; |
| 656 #endif | 676 #endif |
| 657 deleteAllBucketsAndDeallocate(m_table, m_tableSize); | 677 deleteAllBucketsAndDeallocate(m_table, m_tableSize); |
| (...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 853 unsigned m_queueFlag : 1; | 873 unsigned m_queueFlag : 1; |
| 854 unsigned m_accessForbidden : 1; | 874 unsigned m_accessForbidden : 1; |
| 855 unsigned m_modifications; | 875 unsigned m_modifications; |
| 856 #else | 876 #else |
| 857 unsigned m_deletedCount : 31; | 877 unsigned m_deletedCount : 31; |
| 858 unsigned m_queueFlag : 1; | 878 unsigned m_queueFlag : 1; |
| 859 #endif | 879 #endif |
| 860 | 880 |
| 861 #if DUMP_HASHTABLE_STATS_PER_TABLE | 881 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 862 public: | 882 public: |
| 863 mutable std::unique_ptr<Stats> m_stats; | 883 mutable |
| 884 typename std::conditional<Allocator::isGarbageCollected, |
| 885 HashTableStats*, |
| 886 std::unique_ptr<HashTableStats>>::type m_stats; |
| 864 #endif | 887 #endif |
| 865 | 888 |
| 866 template <WeakHandlingFlag x, | 889 template <WeakHandlingFlag x, |
| 867 typename T, | 890 typename T, |
| 868 typename U, | 891 typename U, |
| 869 typename V, | 892 typename V, |
| 870 typename W, | 893 typename W, |
| 871 typename X, | 894 typename X, |
| 872 typename Y, | 895 typename Y, |
| 873 typename Z> | 896 typename Z> |
| (...skipping 21 matching lines...) Expand all Loading... |
| 895 m_keyCount(0), | 918 m_keyCount(0), |
| 896 m_deletedCount(0), | 919 m_deletedCount(0), |
| 897 m_queueFlag(false) | 920 m_queueFlag(false) |
| 898 #if ENABLE(ASSERT) | 921 #if ENABLE(ASSERT) |
| 899 , | 922 , |
| 900 m_accessForbidden(false), | 923 m_accessForbidden(false), |
| 901 m_modifications(0) | 924 m_modifications(0) |
| 902 #endif | 925 #endif |
| 903 #if DUMP_HASHTABLE_STATS_PER_TABLE | 926 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 904 , | 927 , |
| 905 m_stats(wrapUnique(new Stats)) | 928 m_stats(nullptr) |
| 906 #endif | 929 #endif |
| 907 { | 930 { |
| 908 static_assert(Allocator::isGarbageCollected || | 931 static_assert(Allocator::isGarbageCollected || |
| 909 (!IsPointerToGarbageCollectedType<Key>::value && | 932 (!IsPointerToGarbageCollectedType<Key>::value && |
| 910 !IsPointerToGarbageCollectedType<Value>::value), | 933 !IsPointerToGarbageCollectedType<Value>::value), |
| 911 "Cannot put raw pointers to garbage-collected classes into an " | 934 "Cannot put raw pointers to garbage-collected classes into an " |
| 912 "off-heap collection."); | 935 "off-heap collection."); |
| 913 } | 936 } |
| 914 | 937 |
| 915 inline unsigned doubleHash(unsigned key) { | 938 inline unsigned doubleHash(unsigned key) { |
| (...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1323 typename Allocator> | 1346 typename Allocator> |
| 1324 Value* | 1347 Value* |
| 1325 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: | 1348 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: |
| 1326 reinsert(ValueType&& entry) { | 1349 reinsert(ValueType&& entry) { |
| 1327 ASSERT(m_table); | 1350 ASSERT(m_table); |
| 1328 registerModification(); | 1351 registerModification(); |
| 1329 ASSERT(!lookupForWriting(Extractor::extract(entry)).second); | 1352 ASSERT(!lookupForWriting(Extractor::extract(entry)).second); |
| 1330 ASSERT( | 1353 ASSERT( |
| 1331 !isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first))); | 1354 !isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first))); |
| 1332 #if DUMP_HASHTABLE_STATS | 1355 #if DUMP_HASHTABLE_STATS |
| 1333 atomicIncrement(&HashTableStats::numReinserts); | 1356 atomicIncrement(&HashTableStats::instance().numReinserts); |
| 1334 #endif | 1357 #endif |
| 1335 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1358 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1336 ++m_stats->numReinserts; | 1359 ++m_stats->numReinserts; |
| 1337 #endif | 1360 #endif |
| 1338 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first; | 1361 Value* newEntry = lookupForWriting(Extractor::extract(entry)).first; |
| 1339 Mover<ValueType, Allocator, | 1362 Mover<ValueType, Allocator, |
| 1340 Traits::template NeedsToForbidGCOnMove<>::value>::move(std::move(entry), | 1363 Traits::template NeedsToForbidGCOnMove<>::value>::move(std::move(entry), |
| 1341 *newEntry); | 1364 *newEntry); |
| 1342 | 1365 |
| 1343 return newEntry; | 1366 return newEntry; |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1418 typename Allocator> | 1441 typename Allocator> |
| 1419 void HashTable<Key, | 1442 void HashTable<Key, |
| 1420 Value, | 1443 Value, |
| 1421 Extractor, | 1444 Extractor, |
| 1422 HashFunctions, | 1445 HashFunctions, |
| 1423 Traits, | 1446 Traits, |
| 1424 KeyTraits, | 1447 KeyTraits, |
| 1425 Allocator>::remove(ValueType* pos) { | 1448 Allocator>::remove(ValueType* pos) { |
| 1426 registerModification(); | 1449 registerModification(); |
| 1427 #if DUMP_HASHTABLE_STATS | 1450 #if DUMP_HASHTABLE_STATS |
| 1428 atomicIncrement(&HashTableStats::numRemoves); | 1451 atomicIncrement(&HashTableStats::instance().numRemoves); |
| 1429 #endif | 1452 #endif |
| 1430 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1453 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1431 ++m_stats->numRemoves; | 1454 ++m_stats->numRemoves; |
| 1432 #endif | 1455 #endif |
| 1433 | 1456 |
| 1434 ASSERT(!m_accessForbidden); | 1457 ASSERT(!m_accessForbidden); |
| 1435 #if ENABLE(ASSERT) | 1458 #if ENABLE(ASSERT) |
| 1436 m_accessForbidden = true; | 1459 m_accessForbidden = true; |
| 1437 #endif | 1460 #endif |
| 1438 deleteBucket(*pos); | 1461 deleteBucket(*pos); |
| (...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1659 typename KeyTraits, | 1682 typename KeyTraits, |
| 1660 typename Allocator> | 1683 typename Allocator> |
| 1661 Value* | 1684 Value* |
| 1662 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: | 1685 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: |
| 1663 rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry) { | 1686 rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry) { |
| 1664 unsigned oldTableSize = m_tableSize; | 1687 unsigned oldTableSize = m_tableSize; |
| 1665 ValueType* oldTable = m_table; | 1688 ValueType* oldTable = m_table; |
| 1666 | 1689 |
| 1667 #if DUMP_HASHTABLE_STATS | 1690 #if DUMP_HASHTABLE_STATS |
| 1668 if (oldTableSize != 0) | 1691 if (oldTableSize != 0) |
| 1669 atomicIncrement(&HashTableStats::numRehashes); | 1692 atomicIncrement(&HashTableStats::instance().numRehashes); |
| 1670 #endif | 1693 #endif |
| 1671 | 1694 |
| 1672 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1695 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1673 if (oldTableSize != 0) | 1696 if (oldTableSize != 0) |
| 1674 ++m_stats->numRehashes; | 1697 ++m_stats->numRehashes; |
| 1675 #endif | 1698 #endif |
| 1676 | 1699 |
| 1677 m_table = newTable; | 1700 m_table = newTable; |
| 1678 m_tableSize = newTableSize; | 1701 m_tableSize = newTableSize; |
| 1679 | 1702 |
| 1680 Value* newEntry = nullptr; | 1703 Value* newEntry = nullptr; |
| 1681 for (unsigned i = 0; i != oldTableSize; ++i) { | 1704 for (unsigned i = 0; i != oldTableSize; ++i) { |
| 1682 if (isEmptyOrDeletedBucket(oldTable[i])) { | 1705 if (isEmptyOrDeletedBucket(oldTable[i])) { |
| 1683 ASSERT(&oldTable[i] != entry); | 1706 ASSERT(&oldTable[i] != entry); |
| 1684 continue; | 1707 continue; |
| 1685 } | 1708 } |
| 1686 Value* reinsertedEntry = reinsert(std::move(oldTable[i])); | 1709 Value* reinsertedEntry = reinsert(std::move(oldTable[i])); |
| 1687 if (&oldTable[i] == entry) { | 1710 if (&oldTable[i] == entry) { |
| 1688 ASSERT(!newEntry); | 1711 ASSERT(!newEntry); |
| 1689 newEntry = reinsertedEntry; | 1712 newEntry = reinsertedEntry; |
| 1690 } | 1713 } |
| 1691 } | 1714 } |
| 1692 | 1715 |
| 1693 m_deletedCount = 0; | 1716 m_deletedCount = 0; |
| 1694 | 1717 |
| 1718 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1719 if (!m_stats) |
| 1720 m_stats = HashTableStatsPtr<Allocator>::create(); |
| 1721 #endif |
| 1722 |
| 1695 return newEntry; | 1723 return newEntry; |
| 1696 } | 1724 } |
| 1697 | 1725 |
| 1698 template <typename Key, | 1726 template <typename Key, |
| 1699 typename Value, | 1727 typename Value, |
| 1700 typename Extractor, | 1728 typename Extractor, |
| 1701 typename HashFunctions, | 1729 typename HashFunctions, |
| 1702 typename Traits, | 1730 typename Traits, |
| 1703 typename KeyTraits, | 1731 typename KeyTraits, |
| 1704 typename Allocator> | 1732 typename Allocator> |
| 1705 Value* | 1733 Value* |
| 1706 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: | 1734 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: |
| 1707 rehash(unsigned newTableSize, Value* entry) { | 1735 rehash(unsigned newTableSize, Value* entry) { |
| 1708 unsigned oldTableSize = m_tableSize; | 1736 unsigned oldTableSize = m_tableSize; |
| 1709 ValueType* oldTable = m_table; | 1737 ValueType* oldTable = m_table; |
| 1710 | 1738 |
| 1711 #if DUMP_HASHTABLE_STATS | 1739 #if DUMP_HASHTABLE_STATS |
| 1712 if (oldTableSize != 0) | 1740 if (oldTableSize != 0) |
| 1713 atomicIncrement(&HashTableStats::numRehashes); | 1741 atomicIncrement(&HashTableStats::instance().numRehashes); |
| 1714 #endif | 1742 #endif |
| 1715 | 1743 |
| 1716 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1744 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1717 if (oldTableSize != 0) | 1745 if (oldTableSize != 0) |
| 1718 ++m_stats->numRehashes; | 1746 ++m_stats->numRehashes; |
| 1719 #endif | 1747 #endif |
| 1720 | 1748 |
| 1721 // The Allocator::isGarbageCollected check is not needed. The check is just | 1749 // The Allocator::isGarbageCollected check is not needed. The check is just |
| 1722 // a static hint for a compiler to indicate that Base::expandBuffer returns | 1750 // a static hint for a compiler to indicate that Base::expandBuffer returns |
| 1723 // false if Allocator is a PartitionAllocator. | 1751 // false if Allocator is a PartitionAllocator. |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1788 m_keyCount(0), | 1816 m_keyCount(0), |
| 1789 m_deletedCount(0), | 1817 m_deletedCount(0), |
| 1790 m_queueFlag(false) | 1818 m_queueFlag(false) |
| 1791 #if ENABLE(ASSERT) | 1819 #if ENABLE(ASSERT) |
| 1792 , | 1820 , |
| 1793 m_accessForbidden(false), | 1821 m_accessForbidden(false), |
| 1794 m_modifications(0) | 1822 m_modifications(0) |
| 1795 #endif | 1823 #endif |
| 1796 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1824 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1797 , | 1825 , |
| 1798 m_stats(wrapUnique(new Stats(*other.m_stats))) | 1826 m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) |
| 1799 #endif | 1827 #endif |
| 1800 { | 1828 { |
| 1801 // Copy the hash table the dumb way, by adding each element to the new | 1829 // Copy the hash table the dumb way, by adding each element to the new |
| 1802 // table. It might be more efficient to copy the table slots, but it's not | 1830 // table. It might be more efficient to copy the table slots, but it's not |
| 1803 // clear that efficiency is needed. | 1831 // clear that efficiency is needed. |
| 1804 const_iterator end = other.end(); | 1832 const_iterator end = other.end(); |
| 1805 for (const_iterator it = other.begin(); it != end; ++it) | 1833 for (const_iterator it = other.begin(); it != end; ++it) |
| 1806 add(*it); | 1834 add(*it); |
| 1807 } | 1835 } |
| 1808 | 1836 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1820 m_keyCount(0), | 1848 m_keyCount(0), |
| 1821 m_deletedCount(0), | 1849 m_deletedCount(0), |
| 1822 m_queueFlag(false) | 1850 m_queueFlag(false) |
| 1823 #if ENABLE(ASSERT) | 1851 #if ENABLE(ASSERT) |
| 1824 , | 1852 , |
| 1825 m_accessForbidden(false), | 1853 m_accessForbidden(false), |
| 1826 m_modifications(0) | 1854 m_modifications(0) |
| 1827 #endif | 1855 #endif |
| 1828 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1856 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1829 , | 1857 , |
| 1830 m_stats(wrapUnique(new Stats(*other.m_stats))) | 1858 m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) |
| 1831 #endif | 1859 #endif |
| 1832 { | 1860 { |
| 1833 swap(other); | 1861 swap(other); |
| 1834 } | 1862 } |
| 1835 | 1863 |
| 1836 template <typename Key, | 1864 template <typename Key, |
| 1837 typename Value, | 1865 typename Value, |
| 1838 typename Extractor, | 1866 typename Extractor, |
| 1839 typename HashFunctions, | 1867 typename HashFunctions, |
| 1840 typename Traits, | 1868 typename Traits, |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1856 m_deletedCount = other.m_deletedCount; | 1884 m_deletedCount = other.m_deletedCount; |
| 1857 other.m_deletedCount = deleted; | 1885 other.m_deletedCount = deleted; |
| 1858 ASSERT(!m_queueFlag); | 1886 ASSERT(!m_queueFlag); |
| 1859 ASSERT(!other.m_queueFlag); | 1887 ASSERT(!other.m_queueFlag); |
| 1860 | 1888 |
| 1861 #if ENABLE(ASSERT) | 1889 #if ENABLE(ASSERT) |
| 1862 std::swap(m_modifications, other.m_modifications); | 1890 std::swap(m_modifications, other.m_modifications); |
| 1863 #endif | 1891 #endif |
| 1864 | 1892 |
| 1865 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1893 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1866 m_stats.swap(other.m_stats); | 1894 HashTableStatsPtr<Allocator>::swap(m_stats, other.m_stats); |
| 1867 #endif | 1895 #endif |
| 1868 } | 1896 } |
| 1869 | 1897 |
| 1870 template <typename Key, | 1898 template <typename Key, |
| 1871 typename Value, | 1899 typename Value, |
| 1872 typename Extractor, | 1900 typename Extractor, |
| 1873 typename HashFunctions, | 1901 typename HashFunctions, |
| 1874 typename Traits, | 1902 typename Traits, |
| 1875 typename KeyTraits, | 1903 typename KeyTraits, |
| 1876 typename Allocator> | 1904 typename Allocator> |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2021 typename KeyTraits, | 2049 typename KeyTraits, |
| 2022 typename Allocator> | 2050 typename Allocator> |
| 2023 template <typename VisitorDispatcher> | 2051 template <typename VisitorDispatcher> |
| 2024 void HashTable<Key, | 2052 void HashTable<Key, |
| 2025 Value, | 2053 Value, |
| 2026 Extractor, | 2054 Extractor, |
| 2027 HashFunctions, | 2055 HashFunctions, |
| 2028 Traits, | 2056 Traits, |
| 2029 KeyTraits, | 2057 KeyTraits, |
| 2030 Allocator>::trace(VisitorDispatcher visitor) { | 2058 Allocator>::trace(VisitorDispatcher visitor) { |
| 2059 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 2060 Allocator::markNoTracing(visitor, m_stats); |
| 2061 #endif |
| 2062 |
| 2031 // If someone else already marked the backing and queued up the trace and/or | 2063 // If someone else already marked the backing and queued up the trace and/or |
| 2032 // weak callback then we are done. This optimization does not happen for | 2064 // weak callback then we are done. This optimization does not happen for |
| 2033 // ListHashSet since its iterator does not point at the backing. | 2065 // ListHashSet since its iterator does not point at the backing. |
| 2034 if (!m_table || Allocator::isHeapObjectAlive(m_table)) | 2066 if (!m_table || Allocator::isHeapObjectAlive(m_table)) |
| 2035 return; | 2067 return; |
| 2068 |
| 2036 // Normally, we mark the backing store without performing trace. This means | 2069 // Normally, we mark the backing store without performing trace. This means |
| 2037 // it is marked live, but the pointers inside it are not marked. Instead we | 2070 // it is marked live, but the pointers inside it are not marked. Instead we |
| 2038 // will mark the pointers below. However, for backing stores that contain | 2071 // will mark the pointers below. However, for backing stores that contain |
| 2039 // weak pointers the handling is rather different. We don't mark the | 2072 // weak pointers the handling is rather different. We don't mark the |
| 2040 // backing store here, so the marking GC will leave the backing unmarked. If | 2073 // backing store here, so the marking GC will leave the backing unmarked. If |
| 2041 // the backing is found in any other way than through its HashTable (ie from | 2074 // the backing is found in any other way than through its HashTable (ie from |
| 2042 // an iterator) then the mark bit will be set and the pointers will be | 2075 // an iterator) then the mark bit will be set and the pointers will be |
| 2043 // marked strongly, avoiding problems with iterating over things that | 2076 // marked strongly, avoiding problems with iterating over things that |
| 2044 // disappear due to weak processing while we are iterating over them. We | 2077 // disappear due to weak processing while we are iterating over them. We |
| 2045 // register the backing store pointer for delayed marking which will take | 2078 // register the backing store pointer for delayed marking which will take |
| (...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2227 CollectionIterator end(toBeRemoved.end()); | 2260 CollectionIterator end(toBeRemoved.end()); |
| 2228 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it) | 2261 for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it) |
| 2229 collection.remove(*it); | 2262 collection.remove(*it); |
| 2230 } | 2263 } |
| 2231 | 2264 |
| 2232 } // namespace WTF | 2265 } // namespace WTF |
| 2233 | 2266 |
| 2234 #include "wtf/HashIterators.h" | 2267 #include "wtf/HashIterators.h" |
| 2235 | 2268 |
| 2236 #endif // WTF_HashTable_h | 2269 #endif // WTF_HashTable_h |
| OLD | NEW |