| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights | 2 * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights |
| 3 * reserved. | 3 * reserved. |
| 4 * Copyright (C) 2008 David Levin <levin@chromium.org> | 4 * Copyright (C) 2008 David Levin <levin@chromium.org> |
| 5 * | 5 * |
| 6 * This library is free software; you can redistribute it and/or | 6 * This library is free software; you can redistribute it and/or |
| 7 * modify it under the terms of the GNU Library General Public | 7 * modify it under the terms of the GNU Library General Public |
| 8 * License as published by the Free Software Foundation; either | 8 * License as published by the Free Software Foundation; either |
| 9 * version 2 of the License, or (at your option) any later version. | 9 * version 2 of the License, or (at your option) any later version. |
| 10 * | 10 * |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 45 #include <type_traits> | 45 #include <type_traits> |
| 46 #endif | 46 #endif |
| 47 | 47 |
| 48 #if DUMP_HASHTABLE_STATS | 48 #if DUMP_HASHTABLE_STATS |
| 49 #if DUMP_HASHTABLE_STATS_PER_TABLE | 49 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 50 | 50 |
| 51 #define UPDATE_PROBE_COUNTS() \ | 51 #define UPDATE_PROBE_COUNTS() \ |
| 52 ++probeCount; \ | 52 ++probeCount; \ |
| 53 HashTableStats::instance().recordCollisionAtCount(probeCount); \ | 53 HashTableStats::instance().recordCollisionAtCount(probeCount); \ |
| 54 ++perTableProbeCount; \ | 54 ++perTableProbeCount; \ |
| 55 m_stats->recordCollisionAtCount(perTableProbeCount) | 55 stats_->recordCollisionAtCount(perTableProbeCount) |
| 56 #define UPDATE_ACCESS_COUNTS() \ | 56 #define UPDATE_ACCESS_COUNTS() \ |
| 57 atomicIncrement(&HashTableStats::instance().numAccesses); \ | 57 atomicIncrement(&HashTableStats::instance().numAccesses); \ |
| 58 int probeCount = 0; \ | 58 int probeCount = 0; \ |
| 59 ++m_stats->numAccesses; \ | 59 ++stats_->numAccesses; \ |
| 60 int perTableProbeCount = 0 | 60 int perTableProbeCount = 0 |
| 61 #else | 61 #else |
| 62 #define UPDATE_PROBE_COUNTS() \ | 62 #define UPDATE_PROBE_COUNTS() \ |
| 63 ++probeCount; \ | 63 ++probeCount; \ |
| 64 HashTableStats::instance().recordCollisionAtCount(probeCount) | 64 HashTableStats::instance().recordCollisionAtCount(probeCount) |
| 65 #define UPDATE_ACCESS_COUNTS() \ | 65 #define UPDATE_ACCESS_COUNTS() \ |
| 66 atomicIncrement(&HashTableStats::instance().numAccesses); \ | 66 atomicIncrement(&HashTableStats::instance().numAccesses); \ |
| 67 int probeCount = 0 | 67 int probeCount = 0 |
| 68 #endif | 68 #endif |
| 69 #else | 69 #else |
| 70 #if DUMP_HASHTABLE_STATS_PER_TABLE | 70 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 71 #define UPDATE_PROBE_COUNTS() \ | 71 #define UPDATE_PROBE_COUNTS() \ |
| 72 ++perTableProbeCount; \ | 72 ++perTableProbeCount; \ |
| 73 m_stats->recordCollisionAtCount(perTableProbeCount) | 73 stats_->recordCollisionAtCount(perTableProbeCount) |
| 74 #define UPDATE_ACCESS_COUNTS() \ | 74 #define UPDATE_ACCESS_COUNTS() \ |
| 75 ++m_stats->numAccesses; \ | 75 ++stats_->numAccesses; \ |
| 76 int perTableProbeCount = 0 | 76 int perTableProbeCount = 0 |
| 77 #else | 77 #else |
| 78 #define UPDATE_PROBE_COUNTS() \ | 78 #define UPDATE_PROBE_COUNTS() \ |
| 79 do { \ | 79 do { \ |
| 80 } while (0) | 80 } while (0) |
| 81 #define UPDATE_ACCESS_COUNTS() \ | 81 #define UPDATE_ACCESS_COUNTS() \ |
| 82 do { \ | 82 do { \ |
| 83 } while (0) | 83 } while (0) |
| 84 #endif | 84 #endif |
| 85 #endif | 85 #endif |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 364 bool operator==(const iterator& other) const { | 364 bool operator==(const iterator& other) const { |
| 365 return *this == static_cast<const_iterator>(other); | 365 return *this == static_cast<const_iterator>(other); |
| 366 } | 366 } |
| 367 bool operator!=(const iterator& other) const { | 367 bool operator!=(const iterator& other) const { |
| 368 return *this != static_cast<const_iterator>(other); | 368 return *this != static_cast<const_iterator>(other); |
| 369 } | 369 } |
| 370 | 370 |
| 371 std::ostream& PrintTo(std::ostream& stream) const { | 371 std::ostream& PrintTo(std::ostream& stream) const { |
| 372 if (position_ == end_position_) | 372 if (position_ == end_position_) |
| 373 return stream << "iterator representing <end>"; | 373 return stream << "iterator representing <end>"; |
| 374 // TODO(tkent): Change |m_position| to |*m_position| to show the | 374 // TODO(tkent): Change |position_| to |*position_| to show the |
| 375 // pointed object. It requires a lot of new stream printer functions. | 375 // pointed object. It requires a lot of new stream printer functions. |
| 376 return stream << "iterator pointing to " << position_; | 376 return stream << "iterator pointing to " << position_; |
| 377 } | 377 } |
| 378 | 378 |
| 379 private: | 379 private: |
| 380 PointerType position_; | 380 PointerType position_; |
| 381 PointerType end_position_; | 381 PointerType end_position_; |
| 382 #if DCHECK_IS_ON() | 382 #if DCHECK_IS_ON() |
| 383 const HashTableType* container_; | 383 const HashTableType* container_; |
| 384 int64_t container_modifications_; | 384 int64_t container_modifications_; |
| (...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 898 #else | 898 #else |
| 899 unsigned deleted_count_ : 31; | 899 unsigned deleted_count_ : 31; |
| 900 unsigned queue_flag_ : 1; | 900 unsigned queue_flag_ : 1; |
| 901 #endif | 901 #endif |
| 902 | 902 |
| 903 #if DUMP_HASHTABLE_STATS_PER_TABLE | 903 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 904 public: | 904 public: |
| 905 mutable | 905 mutable |
| 906 typename std::conditional<Allocator::isGarbageCollected, | 906 typename std::conditional<Allocator::isGarbageCollected, |
| 907 HashTableStats*, | 907 HashTableStats*, |
| 908 std::unique_ptr<HashTableStats>>::type m_stats; | 908 std::unique_ptr<HashTableStats>>::type stats_; |
| 909 #endif | 909 #endif |
| 910 | 910 |
| 911 template <WeakHandlingFlag x, | 911 template <WeakHandlingFlag x, |
| 912 typename T, | 912 typename T, |
| 913 typename U, | 913 typename U, |
| 914 typename V, | 914 typename V, |
| 915 typename W, | 915 typename W, |
| 916 typename X, | 916 typename X, |
| 917 typename Y, | 917 typename Y, |
| 918 typename Z> | 918 typename Z> |
| (...skipping 21 matching lines...) Expand all Loading... |
| 940 key_count_(0), | 940 key_count_(0), |
| 941 deleted_count_(0), | 941 deleted_count_(0), |
| 942 queue_flag_(false) | 942 queue_flag_(false) |
| 943 #if DCHECK_IS_ON() | 943 #if DCHECK_IS_ON() |
| 944 , | 944 , |
| 945 access_forbidden_(false), | 945 access_forbidden_(false), |
| 946 modifications_(0) | 946 modifications_(0) |
| 947 #endif | 947 #endif |
| 948 #if DUMP_HASHTABLE_STATS_PER_TABLE | 948 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 949 , | 949 , |
| 950 m_stats(nullptr) | 950 stats_(nullptr) |
| 951 #endif | 951 #endif |
| 952 { | 952 { |
| 953 static_assert(Allocator::kIsGarbageCollected || | 953 static_assert(Allocator::kIsGarbageCollected || |
| 954 (!IsPointerToGarbageCollectedType<Key>::value && | 954 (!IsPointerToGarbageCollectedType<Key>::value && |
| 955 !IsPointerToGarbageCollectedType<Value>::value), | 955 !IsPointerToGarbageCollectedType<Value>::value), |
| 956 "Cannot put raw pointers to garbage-collected classes into an " | 956 "Cannot put raw pointers to garbage-collected classes into an " |
| 957 "off-heap collection."); | 957 "off-heap collection."); |
| 958 } | 958 } |
| 959 | 959 |
| 960 inline unsigned DoubleHash(unsigned key) { | 960 inline unsigned DoubleHash(unsigned key) { |
| (...skipping 410 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1371 Reinsert(ValueType&& entry) { | 1371 Reinsert(ValueType&& entry) { |
| 1372 DCHECK(table_); | 1372 DCHECK(table_); |
| 1373 RegisterModification(); | 1373 RegisterModification(); |
| 1374 DCHECK(!LookupForWriting(Extractor::Extract(entry)).second); | 1374 DCHECK(!LookupForWriting(Extractor::Extract(entry)).second); |
| 1375 DCHECK( | 1375 DCHECK( |
| 1376 !IsDeletedBucket(*(LookupForWriting(Extractor::Extract(entry)).first))); | 1376 !IsDeletedBucket(*(LookupForWriting(Extractor::Extract(entry)).first))); |
| 1377 #if DUMP_HASHTABLE_STATS | 1377 #if DUMP_HASHTABLE_STATS |
| 1378 atomicIncrement(&HashTableStats::instance().numReinserts); | 1378 atomicIncrement(&HashTableStats::instance().numReinserts); |
| 1379 #endif | 1379 #endif |
| 1380 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1380 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1381 ++m_stats->numReinserts; | 1381 ++stats_->numReinserts; |
| 1382 #endif | 1382 #endif |
| 1383 Value* new_entry = LookupForWriting(Extractor::Extract(entry)).first; | 1383 Value* new_entry = LookupForWriting(Extractor::Extract(entry)).first; |
| 1384 Mover<ValueType, Allocator, | 1384 Mover<ValueType, Allocator, |
| 1385 Traits::template NeedsToForbidGCOnMove<>::value>::Move(std::move(entry), | 1385 Traits::template NeedsToForbidGCOnMove<>::value>::Move(std::move(entry), |
| 1386 *new_entry); | 1386 *new_entry); |
| 1387 | 1387 |
| 1388 return new_entry; | 1388 return new_entry; |
| 1389 } | 1389 } |
| 1390 | 1390 |
| 1391 template <typename Key, | 1391 template <typename Key, |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1466 Extractor, | 1466 Extractor, |
| 1467 HashFunctions, | 1467 HashFunctions, |
| 1468 Traits, | 1468 Traits, |
| 1469 KeyTraits, | 1469 KeyTraits, |
| 1470 Allocator>::erase(ValueType* pos) { | 1470 Allocator>::erase(ValueType* pos) { |
| 1471 RegisterModification(); | 1471 RegisterModification(); |
| 1472 #if DUMP_HASHTABLE_STATS | 1472 #if DUMP_HASHTABLE_STATS |
| 1473 atomicIncrement(&HashTableStats::instance().numRemoves); | 1473 atomicIncrement(&HashTableStats::instance().numRemoves); |
| 1474 #endif | 1474 #endif |
| 1475 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1475 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1476 ++m_stats->numRemoves; | 1476 ++stats_->numRemoves; |
| 1477 #endif | 1477 #endif |
| 1478 | 1478 |
| 1479 EnterAccessForbiddenScope(); | 1479 EnterAccessForbiddenScope(); |
| 1480 DeleteBucket(*pos); | 1480 DeleteBucket(*pos); |
| 1481 LeaveAccessForbiddenScope(); | 1481 LeaveAccessForbiddenScope(); |
| 1482 ++deleted_count_; | 1482 ++deleted_count_; |
| 1483 --key_count_; | 1483 --key_count_; |
| 1484 | 1484 |
| 1485 if (ShouldShrink()) | 1485 if (ShouldShrink()) |
| 1486 Shrink(); | 1486 Shrink(); |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1701 unsigned old_table_size = table_size_; | 1701 unsigned old_table_size = table_size_; |
| 1702 ValueType* old_table = table_; | 1702 ValueType* old_table = table_; |
| 1703 | 1703 |
| 1704 #if DUMP_HASHTABLE_STATS | 1704 #if DUMP_HASHTABLE_STATS |
| 1705 if (oldTableSize != 0) | 1705 if (oldTableSize != 0) |
| 1706 atomicIncrement(&HashTableStats::instance().numRehashes); | 1706 atomicIncrement(&HashTableStats::instance().numRehashes); |
| 1707 #endif | 1707 #endif |
| 1708 | 1708 |
| 1709 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1709 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1710 if (oldTableSize != 0) | 1710 if (oldTableSize != 0) |
| 1711 ++m_stats->numRehashes; | 1711 ++stats_->numRehashes; |
| 1712 #endif | 1712 #endif |
| 1713 | 1713 |
| 1714 table_ = new_table; | 1714 table_ = new_table; |
| 1715 table_size_ = new_table_size; | 1715 table_size_ = new_table_size; |
| 1716 | 1716 |
| 1717 Value* new_entry = nullptr; | 1717 Value* new_entry = nullptr; |
| 1718 for (unsigned i = 0; i != old_table_size; ++i) { | 1718 for (unsigned i = 0; i != old_table_size; ++i) { |
| 1719 if (IsEmptyOrDeletedBucket(old_table[i])) { | 1719 if (IsEmptyOrDeletedBucket(old_table[i])) { |
| 1720 DCHECK_NE(&old_table[i], entry); | 1720 DCHECK_NE(&old_table[i], entry); |
| 1721 continue; | 1721 continue; |
| 1722 } | 1722 } |
| 1723 Value* reinserted_entry = Reinsert(std::move(old_table[i])); | 1723 Value* reinserted_entry = Reinsert(std::move(old_table[i])); |
| 1724 if (&old_table[i] == entry) { | 1724 if (&old_table[i] == entry) { |
| 1725 DCHECK(!new_entry); | 1725 DCHECK(!new_entry); |
| 1726 new_entry = reinserted_entry; | 1726 new_entry = reinserted_entry; |
| 1727 } | 1727 } |
| 1728 } | 1728 } |
| 1729 | 1729 |
| 1730 deleted_count_ = 0; | 1730 deleted_count_ = 0; |
| 1731 | 1731 |
| 1732 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1732 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1733 if (!m_stats) | 1733 if (!stats_) |
| 1734 m_stats = HashTableStatsPtr<Allocator>::create(); | 1734 stats_ = HashTableStatsPtr<Allocator>::create(); |
| 1735 #endif | 1735 #endif |
| 1736 | 1736 |
| 1737 return new_entry; | 1737 return new_entry; |
| 1738 } | 1738 } |
| 1739 | 1739 |
| 1740 template <typename Key, | 1740 template <typename Key, |
| 1741 typename Value, | 1741 typename Value, |
| 1742 typename Extractor, | 1742 typename Extractor, |
| 1743 typename HashFunctions, | 1743 typename HashFunctions, |
| 1744 typename Traits, | 1744 typename Traits, |
| 1745 typename KeyTraits, | 1745 typename KeyTraits, |
| 1746 typename Allocator> | 1746 typename Allocator> |
| 1747 Value* | 1747 Value* |
| 1748 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: | 1748 HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: |
| 1749 Rehash(unsigned new_table_size, Value* entry) { | 1749 Rehash(unsigned new_table_size, Value* entry) { |
| 1750 unsigned old_table_size = table_size_; | 1750 unsigned old_table_size = table_size_; |
| 1751 ValueType* old_table = table_; | 1751 ValueType* old_table = table_; |
| 1752 | 1752 |
| 1753 #if DUMP_HASHTABLE_STATS | 1753 #if DUMP_HASHTABLE_STATS |
| 1754 if (oldTableSize != 0) | 1754 if (oldTableSize != 0) |
| 1755 atomicIncrement(&HashTableStats::instance().numRehashes); | 1755 atomicIncrement(&HashTableStats::instance().numRehashes); |
| 1756 #endif | 1756 #endif |
| 1757 | 1757 |
| 1758 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1758 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1759 if (oldTableSize != 0) | 1759 if (oldTableSize != 0) |
| 1760 ++m_stats->numRehashes; | 1760 ++stats_->numRehashes; |
| 1761 #endif | 1761 #endif |
| 1762 | 1762 |
| 1763 // The Allocator::isGarbageCollected check is not needed. The check is just | 1763 // The Allocator::isGarbageCollected check is not needed. The check is just |
| 1764 // a static hint for a compiler to indicate that Base::expandBuffer returns | 1764 // a static hint for a compiler to indicate that Base::expandBuffer returns |
| 1765 // false if Allocator is a PartitionAllocator. | 1765 // false if Allocator is a PartitionAllocator. |
| 1766 if (Allocator::kIsGarbageCollected && new_table_size > old_table_size) { | 1766 if (Allocator::kIsGarbageCollected && new_table_size > old_table_size) { |
| 1767 bool success; | 1767 bool success; |
| 1768 Value* new_entry = ExpandBuffer(new_table_size, entry, success); | 1768 Value* new_entry = ExpandBuffer(new_table_size, entry, success); |
| 1769 if (success) | 1769 if (success) |
| 1770 return new_entry; | 1770 return new_entry; |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1820 key_count_(0), | 1820 key_count_(0), |
| 1821 deleted_count_(0), | 1821 deleted_count_(0), |
| 1822 queue_flag_(false) | 1822 queue_flag_(false) |
| 1823 #if DCHECK_IS_ON() | 1823 #if DCHECK_IS_ON() |
| 1824 , | 1824 , |
| 1825 access_forbidden_(false), | 1825 access_forbidden_(false), |
| 1826 modifications_(0) | 1826 modifications_(0) |
| 1827 #endif | 1827 #endif |
| 1828 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1828 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1829 , | 1829 , |
| 1830 m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) | 1830 stats_(HashTableStatsPtr<Allocator>::copy(other.stats_)) |
| 1831 #endif | 1831 #endif |
| 1832 { | 1832 { |
| 1833 if (other.size()) | 1833 if (other.size()) |
| 1834 ReserveCapacityForSize(other.size()); | 1834 ReserveCapacityForSize(other.size()); |
| 1835 // Copy the hash table the dumb way, by adding each element to the new | 1835 // Copy the hash table the dumb way, by adding each element to the new |
| 1836 // table. It might be more efficient to copy the table slots, but it's not | 1836 // table. It might be more efficient to copy the table slots, but it's not |
| 1837 // clear that efficiency is needed. | 1837 // clear that efficiency is needed. |
| 1838 for (const auto& element : other) | 1838 for (const auto& element : other) |
| 1839 insert(element); | 1839 insert(element); |
| 1840 } | 1840 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1853 key_count_(0), | 1853 key_count_(0), |
| 1854 deleted_count_(0), | 1854 deleted_count_(0), |
| 1855 queue_flag_(false) | 1855 queue_flag_(false) |
| 1856 #if DCHECK_IS_ON() | 1856 #if DCHECK_IS_ON() |
| 1857 , | 1857 , |
| 1858 access_forbidden_(false), | 1858 access_forbidden_(false), |
| 1859 modifications_(0) | 1859 modifications_(0) |
| 1860 #endif | 1860 #endif |
| 1861 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1861 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1862 , | 1862 , |
| 1863 m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) | 1863 stats_(HashTableStatsPtr<Allocator>::copy(other.stats_)) |
| 1864 #endif | 1864 #endif |
| 1865 { | 1865 { |
| 1866 swap(other); | 1866 swap(other); |
| 1867 } | 1867 } |
| 1868 | 1868 |
| 1869 template <typename Key, | 1869 template <typename Key, |
| 1870 typename Value, | 1870 typename Value, |
| 1871 typename Extractor, | 1871 typename Extractor, |
| 1872 typename HashFunctions, | 1872 typename HashFunctions, |
| 1873 typename Traits, | 1873 typename Traits, |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1889 deleted_count_ = other.deleted_count_; | 1889 deleted_count_ = other.deleted_count_; |
| 1890 other.deleted_count_ = deleted; | 1890 other.deleted_count_ = deleted; |
| 1891 DCHECK(!queue_flag_); | 1891 DCHECK(!queue_flag_); |
| 1892 DCHECK(!other.queue_flag_); | 1892 DCHECK(!other.queue_flag_); |
| 1893 | 1893 |
| 1894 #if DCHECK_IS_ON() | 1894 #if DCHECK_IS_ON() |
| 1895 std::swap(modifications_, other.modifications_); | 1895 std::swap(modifications_, other.modifications_); |
| 1896 #endif | 1896 #endif |
| 1897 | 1897 |
| 1898 #if DUMP_HASHTABLE_STATS_PER_TABLE | 1898 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 1899 HashTableStatsPtr<Allocator>::swap(m_stats, other.m_stats); | 1899 HashTableStatsPtr<Allocator>::swap(stats_, other.stats_); |
| 1900 #endif | 1900 #endif |
| 1901 } | 1901 } |
| 1902 | 1902 |
| 1903 template <typename Key, | 1903 template <typename Key, |
| 1904 typename Value, | 1904 typename Value, |
| 1905 typename Extractor, | 1905 typename Extractor, |
| 1906 typename HashFunctions, | 1906 typename HashFunctions, |
| 1907 typename Traits, | 1907 typename Traits, |
| 1908 typename KeyTraits, | 1908 typename KeyTraits, |
| 1909 typename Allocator> | 1909 typename Allocator> |
| (...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2057 typename Allocator> | 2057 typename Allocator> |
| 2058 template <typename VisitorDispatcher> | 2058 template <typename VisitorDispatcher> |
| 2059 void HashTable<Key, | 2059 void HashTable<Key, |
| 2060 Value, | 2060 Value, |
| 2061 Extractor, | 2061 Extractor, |
| 2062 HashFunctions, | 2062 HashFunctions, |
| 2063 Traits, | 2063 Traits, |
| 2064 KeyTraits, | 2064 KeyTraits, |
| 2065 Allocator>::Trace(VisitorDispatcher visitor) { | 2065 Allocator>::Trace(VisitorDispatcher visitor) { |
| 2066 #if DUMP_HASHTABLE_STATS_PER_TABLE | 2066 #if DUMP_HASHTABLE_STATS_PER_TABLE |
| 2067 Allocator::markNoTracing(visitor, m_stats); | 2067 Allocator::markNoTracing(visitor, stats_); |
| 2068 #endif | 2068 #endif |
| 2069 | 2069 |
| 2070 // If someone else already marked the backing and queued up the trace and/or | 2070 // If someone else already marked the backing and queued up the trace and/or |
| 2071 // weak callback then we are done. This optimization does not happen for | 2071 // weak callback then we are done. This optimization does not happen for |
| 2072 // ListHashSet since its iterator does not point at the backing. | 2072 // ListHashSet since its iterator does not point at the backing. |
| 2073 if (!table_ || Allocator::IsHeapObjectAlive(table_)) | 2073 if (!table_ || Allocator::IsHeapObjectAlive(table_)) |
| 2074 return; | 2074 return; |
| 2075 | 2075 |
| 2076 // Normally, we mark the backing store without performing trace. This means | 2076 // Normally, we mark the backing store without performing trace. This means |
| 2077 // it is marked live, but the pointers inside it are not marked. Instead we | 2077 // it is marked live, but the pointers inside it are not marked. Instead we |
| (...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2275 CollectionIterator end(to_be_removed.end()); | 2275 CollectionIterator end(to_be_removed.end()); |
| 2276 for (CollectionIterator it(to_be_removed.begin()); it != end; ++it) | 2276 for (CollectionIterator it(to_be_removed.begin()); it != end; ++it) |
| 2277 collection.erase(*it); | 2277 collection.erase(*it); |
| 2278 } | 2278 } |
| 2279 | 2279 |
| 2280 } // namespace WTF | 2280 } // namespace WTF |
| 2281 | 2281 |
| 2282 #include "platform/wtf/HashIterators.h" | 2282 #include "platform/wtf/HashIterators.h" |
| 2283 | 2283 |
| 2284 #endif // WTF_HashTable_h | 2284 #endif // WTF_HashTable_h |
| OLD | NEW |