Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 292 void VerifyPointers(Object** start, Object** end) override { | 292 void VerifyPointers(Object** start, Object** end) override { |
| 293 for (Object** current = start; current < end; current++) { | 293 for (Object** current = start; current < end; current++) { |
| 294 if ((*current)->IsHeapObject()) { | 294 if ((*current)->IsHeapObject()) { |
| 295 HeapObject* object = HeapObject::cast(*current); | 295 HeapObject* object = HeapObject::cast(*current); |
| 296 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | 296 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
| 297 } | 297 } |
| 298 } | 298 } |
| 299 } | 299 } |
| 300 }; | 300 }; |
| 301 | 301 |
| 302 class YoungGenerationEvacuationVerifier : public EvacuationVerifier { | |
| 303 public: | |
| 304 explicit YoungGenerationEvacuationVerifier(Heap* heap) | |
| 305 : EvacuationVerifier(heap) {} | |
| 306 | |
| 307 void Run() override { | |
| 308 VerifyRoots(VISIT_ALL_IN_SCAVENGE); | |
| 309 VerifyEvacuation(heap_->new_space()); | |
|
ulan
2017/04/26 09:51:28
Verification should be done for the whole heap.
Michael Lippautz
2017/05/02 11:22:00
Done.
| |
| 310 } | |
| 311 | |
| 312 protected: | |
| 313 void VerifyPointers(Object** start, Object** end) override { | |
| 314 for (Object** current = start; current < end; current++) { | |
| 315 if ((*current)->IsHeapObject()) { | |
| 316 HeapObject* object = HeapObject::cast(*current); | |
| 317 if (!heap_->InNewSpace(object)) return; | |
|
ulan
2017/04/26 09:51:28
Let's instead do a check that object is not in fro
Michael Lippautz
2017/05/02 11:22:00
Done.
| |
| 318 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | |
| 319 } | |
| 320 } | |
| 321 } | |
| 322 }; | |
| 323 | |
| 302 } // namespace | 324 } // namespace |
| 303 #endif // VERIFY_HEAP | 325 #endif // VERIFY_HEAP |
| 304 | 326 |
| 305 // ============================================================================= | 327 // ============================================================================= |
| 306 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector | 328 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector |
| 307 // ============================================================================= | 329 // ============================================================================= |
| 308 | 330 |
| 309 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( | 331 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( |
| 310 int pages, intptr_t live_bytes) { | 332 int pages, intptr_t live_bytes) { |
| 311 if (!FLAG_parallel_compaction) return 1; | 333 if (!FLAG_parallel_compaction) return 1; |
| (...skipping 1276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1588 *p = the_hole; | 1610 *p = the_hole; |
| 1589 } | 1611 } |
| 1590 } | 1612 } |
| 1591 } | 1613 } |
| 1592 } | 1614 } |
| 1593 | 1615 |
| 1594 private: | 1616 private: |
| 1595 Heap* heap_; | 1617 Heap* heap_; |
| 1596 }; | 1618 }; |
| 1597 | 1619 |
| 1620 // Helper class for pruning the string table. | |
| 1621 class YoungGenerationExternalStringTableCleaner : public RootVisitor { | |
| 1622 public: | |
| 1623 YoungGenerationExternalStringTableCleaner( | |
| 1624 const MinorMarkCompactCollector& collector) | |
| 1625 : heap_(collector.heap()), collector_(collector) {} | |
| 1626 | |
| 1627 void VisitRootPointers(Root root, Object** start, Object** end) override { | |
| 1628 DCHECK_EQ(root, Root::kExternalStringsTable); | |
| 1629 // Visit all HeapObject pointers in [start, end). | |
| 1630 for (Object** p = start; p < end; p++) { | |
| 1631 Object* o = *p; | |
| 1632 if (o->IsHeapObject()) { | |
| 1633 HeapObject* heap_object = HeapObject::cast(o); | |
| 1634 if (ObjectMarking::IsWhite(heap_object, | |
| 1635 collector_.marking_state(heap_object))) { | |
| 1636 if (o->IsExternalString()) { | |
| 1637 heap_->FinalizeExternalString(String::cast(*p)); | |
| 1638 } else { | |
| 1639 // The original external string may have been internalized. | |
| 1640 DCHECK(o->IsThinString()); | |
| 1641 } | |
| 1642 // Set the entry to the_hole_value (as deleted). | |
| 1643 *p = heap_->the_hole_value(); | |
| 1644 } | |
| 1645 } | |
| 1646 } | |
| 1647 } | |
| 1648 | |
| 1649 private: | |
| 1650 Heap* heap_; | |
| 1651 const MinorMarkCompactCollector& collector_; | |
| 1652 }; | |
| 1653 | |
| 1654 // Marked young generation objects and all old generation objects will be | |
| 1655 // retained. | |
| 1656 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer { | |
| 1657 public: | |
| 1658 explicit MinorMarkCompactWeakObjectRetainer( | |
| 1659 const MinorMarkCompactCollector& collector) | |
| 1660 : collector_(collector) {} | |
| 1661 | |
| 1662 virtual Object* RetainAs(Object* object) { | |
| 1663 HeapObject* heap_object = HeapObject::cast(object); | |
| 1664 if (!collector_.heap()->InNewSpace(heap_object)) return object; | |
| 1665 | |
| 1666 DCHECK(!ObjectMarking::IsGrey(heap_object, | |
| 1667 MarkingState::External(heap_object))); | |
|
ulan
2017/04/26 09:51:28
Did you mean collector_.marking_state(heap_object)
Michael Lippautz
2017/05/02 11:22:00
Done.
| |
| 1668 if (ObjectMarking::IsBlack(heap_object, | |
| 1669 collector_.marking_state(heap_object))) { | |
| 1670 return object; | |
| 1671 } | |
| 1672 return nullptr; | |
| 1673 } | |
| 1674 | |
| 1675 private: | |
| 1676 const MinorMarkCompactCollector& collector_; | |
| 1677 }; | |
| 1678 | |
| 1598 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects | 1679 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
| 1599 // are retained. | 1680 // are retained. |
| 1600 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { | 1681 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| 1601 public: | 1682 public: |
| 1602 virtual Object* RetainAs(Object* object) { | 1683 virtual Object* RetainAs(Object* object) { |
| 1603 HeapObject* heap_object = HeapObject::cast(object); | 1684 HeapObject* heap_object = HeapObject::cast(object); |
| 1604 DCHECK(!ObjectMarking::IsGrey(heap_object, | 1685 DCHECK(!ObjectMarking::IsGrey(heap_object, |
| 1605 MarkingState::Internal(heap_object))); | 1686 MarkingState::Internal(heap_object))); |
| 1606 if (ObjectMarking::IsBlack(heap_object, | 1687 if (ObjectMarking::IsBlack(heap_object, |
| 1607 MarkingState::Internal(heap_object))) { | 1688 MarkingState::Internal(heap_object))) { |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1712 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { | 1793 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { |
| 1713 DCHECK_EQ(host, rinfo->host()); | 1794 DCHECK_EQ(host, rinfo->host()); |
| 1714 DCHECK(rinfo->rmode() == RelocInfo::CELL); | 1795 DCHECK(rinfo->rmode() == RelocInfo::CELL); |
| 1715 Cell* cell = rinfo->target_cell(); | 1796 Cell* cell = rinfo->target_cell(); |
| 1716 // The cell is always in old space, we don't have to record the slot in | 1797 // The cell is always in old space, we don't have to record the slot in |
| 1717 // the old-to-new remembered set. | 1798 // the old-to-new remembered set. |
| 1718 DCHECK(!collector_->heap()->InNewSpace(cell)); | 1799 DCHECK(!collector_->heap()->InNewSpace(cell)); |
| 1719 collector_->RecordRelocSlot(host, rinfo, cell); | 1800 collector_->RecordRelocSlot(host, rinfo, cell); |
| 1720 } | 1801 } |
| 1721 | 1802 |
| 1722 // Entries that will never move. | |
| 1723 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { | 1803 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { |
| 1724 DCHECK_EQ(host, rinfo->host()); | 1804 DCHECK_EQ(host, rinfo->host()); |
| 1725 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); | 1805 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); |
| 1726 Code* stub = rinfo->code_age_stub(); | 1806 Code* stub = rinfo->code_age_stub(); |
| 1727 USE(stub); | 1807 USE(stub); |
| 1728 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); | 1808 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); |
| 1729 } | 1809 } |
| 1730 | 1810 |
| 1731 // Entries that are skipped for recording. | 1811 // Entries that are skipped for recording. |
| 1732 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} | 1812 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 1743 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); | 1823 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); |
| 1744 } else if (p->IsEvacuationCandidate()) { | 1824 } else if (p->IsEvacuationCandidate()) { |
| 1745 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); | 1825 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); |
| 1746 } | 1826 } |
| 1747 } | 1827 } |
| 1748 } | 1828 } |
| 1749 | 1829 |
| 1750 MarkCompactCollector* collector_; | 1830 MarkCompactCollector* collector_; |
| 1751 }; | 1831 }; |
| 1752 | 1832 |
| 1833 class YoungGenerationRecordMigratedSlotVisitor final | |
| 1834 : public RecordMigratedSlotVisitor { | |
| 1835 public: | |
| 1836 explicit YoungGenerationRecordMigratedSlotVisitor( | |
| 1837 MarkCompactCollector* collector) | |
| 1838 : RecordMigratedSlotVisitor(collector) {} | |
| 1839 | |
| 1840 inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final { | |
| 1841 Address code_entry = Memory::Address_at(code_entry_slot); | |
| 1842 if (Page::FromAddress(code_entry)->IsEvacuationCandidate() && | |
| 1843 IsLive(host)) { | |
| 1844 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), | |
| 1845 nullptr, CODE_ENTRY_SLOT, | |
| 1846 code_entry_slot); | |
| 1847 } | |
| 1848 } | |
| 1849 | |
| 1850 void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1851 void VisitDebugTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1852 void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final { | |
| 1853 UNREACHABLE(); | |
| 1854 } | |
| 1855 void VisitCellPointer(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1856 void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final { | |
| 1857 UNREACHABLE(); | |
| 1858 } | |
| 1859 | |
| 1860 private: | |
| 1861 // Only record slots for host objects that are considered as live by the full | |
| 1862 // collector. | |
| 1863 inline bool IsLive(HeapObject* object) { | |
| 1864 return ObjectMarking::IsBlack(object, collector_->marking_state(object)); | |
| 1865 } | |
| 1866 | |
| 1867 inline void RecordMigratedSlot(HeapObject* host, Object* value, | |
| 1868 Address slot) final { | |
| 1869 if (value->IsHeapObject()) { | |
| 1870 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); | |
| 1871 if (p->InNewSpace()) { | |
| 1872 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); | |
| 1873 } else if (p->IsEvacuationCandidate() && IsLive(host)) { | |
| 1874 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); | |
| 1875 } | |
| 1876 } | |
| 1877 } | |
| 1878 }; | |
| 1879 | |
| 1753 class HeapObjectVisitor { | 1880 class HeapObjectVisitor { |
| 1754 public: | 1881 public: |
| 1755 virtual ~HeapObjectVisitor() {} | 1882 virtual ~HeapObjectVisitor() {} |
| 1756 virtual bool Visit(HeapObject* object) = 0; | 1883 virtual bool Visit(HeapObject* object) = 0; |
| 1757 }; | 1884 }; |
| 1758 | 1885 |
| 1886 class MigrationObserver { | |
| 1887 public: | |
| 1888 explicit MigrationObserver(Heap* heap) : heap_(heap) {} | |
| 1889 | |
| 1890 virtual ~MigrationObserver() {} | |
| 1891 virtual inline void Move(HeapObject* src, HeapObject* dst) {} | |
| 1892 | |
| 1893 protected: | |
| 1894 Heap* heap_; | |
| 1895 }; | |
| 1896 | |
| 1897 class YoungGenerationMigrationObserver : public MigrationObserver { | |
| 1898 public: | |
| 1899 YoungGenerationMigrationObserver( | |
| 1900 Heap* heap, MarkCompactCollector* mark_compact_collector, | |
| 1901 std::vector<HeapObject*>* black_allocation_objects) | |
| 1902 : MigrationObserver(heap), | |
| 1903 mark_compact_collector_(mark_compact_collector), | |
| 1904 black_allocation_objects_(black_allocation_objects) {} | |
| 1905 | |
| 1906 inline void Move(HeapObject* src, HeapObject* dst) final { | |
| 1907 // Migrate color to old generation marking in case the object survived young | |
| 1908 // generation garbage collection. | |
| 1909 if (heap_->incremental_marking()->IsMarking()) { | |
| 1910 const MarkingState state = mark_compact_collector_->marking_state(dst); | |
| 1911 if (heap_->incremental_marking()->black_allocation() && | |
| 1912 ObjectMarking::IsBlack(dst, state)) { | |
|
ulan
2017/04/26 09:51:28
if (ObjectMarking::IsBlack(dst, state)) {
DCHECK
Michael Lippautz
2017/05/02 11:22:00
Done.
| |
| 1913 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 1914 black_allocation_objects_->push_back(dst); | |
| 1915 } | |
| 1916 | |
| 1917 // Transfer old generation marking state. | |
| 1918 if (!ObjectMarking::IsBlack(dst, state)) { | |
| 1919 IncrementalMarking::TransferColor<MarkBit::ATOMIC>(src, dst); | |
| 1920 } | |
| 1921 } | |
| 1922 } | |
| 1923 | |
| 1924 protected: | |
| 1925 base::Mutex mutex_; | |
| 1926 MarkCompactCollector* mark_compact_collector_; | |
| 1927 std::vector<HeapObject*>* black_allocation_objects_; | |
| 1928 }; | |
| 1929 | |
| 1759 class EvacuateVisitorBase : public HeapObjectVisitor { | 1930 class EvacuateVisitorBase : public HeapObjectVisitor { |
| 1760 protected: | 1931 protected: |
| 1761 enum MigrationMode { kFast, kProfiled }; | 1932 enum MigrationMode { kFast, kProfiled }; |
| 1762 | 1933 |
| 1763 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, | 1934 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, |
| 1764 RecordMigratedSlotVisitor* record_visitor) | 1935 RecordMigratedSlotVisitor* record_visitor, |
| 1936 MigrationObserver* migration_observer) | |
| 1765 : heap_(heap), | 1937 : heap_(heap), |
| 1766 compaction_spaces_(compaction_spaces), | 1938 compaction_spaces_(compaction_spaces), |
| 1767 record_visitor_(record_visitor), | 1939 record_visitor_(record_visitor), |
| 1940 migration_observer_(migration_observer), | |
| 1768 profiling_( | 1941 profiling_( |
| 1769 heap->isolate()->is_profiling() || | 1942 heap->isolate()->is_profiling() || |
| 1770 heap->isolate()->logger()->is_logging_code_events() || | 1943 heap->isolate()->logger()->is_logging_code_events() || |
| 1771 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {} | 1944 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {} |
| 1772 | 1945 |
| 1773 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, | 1946 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, |
| 1774 HeapObject** target_object) { | 1947 HeapObject** target_object) { |
| 1775 #ifdef VERIFY_HEAP | 1948 #ifdef VERIFY_HEAP |
| 1776 if (AbortCompactionForTesting(object)) return false; | 1949 if (AbortCompactionForTesting(object)) return false; |
| 1777 #endif // VERIFY_HEAP | 1950 #endif // VERIFY_HEAP |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 1802 DCHECK(heap_->AllowedToBeMigrated(src, dest)); | 1975 DCHECK(heap_->AllowedToBeMigrated(src, dest)); |
| 1803 DCHECK(dest != LO_SPACE); | 1976 DCHECK(dest != LO_SPACE); |
| 1804 if (dest == OLD_SPACE) { | 1977 if (dest == OLD_SPACE) { |
| 1805 DCHECK_OBJECT_SIZE(size); | 1978 DCHECK_OBJECT_SIZE(size); |
| 1806 DCHECK(IsAligned(size, kPointerSize)); | 1979 DCHECK(IsAligned(size, kPointerSize)); |
| 1807 heap_->CopyBlock(dst_addr, src_addr, size); | 1980 heap_->CopyBlock(dst_addr, src_addr, size); |
| 1808 if ((mode == kProfiled) && dst->IsBytecodeArray()) { | 1981 if ((mode == kProfiled) && dst->IsBytecodeArray()) { |
| 1809 PROFILE(heap_->isolate(), | 1982 PROFILE(heap_->isolate(), |
| 1810 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); | 1983 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); |
| 1811 } | 1984 } |
| 1985 migration_observer_->Move(src, dst); | |
| 1812 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); | 1986 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); |
| 1813 } else if (dest == CODE_SPACE) { | 1987 } else if (dest == CODE_SPACE) { |
| 1814 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space()); | 1988 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space()); |
| 1815 if (mode == kProfiled) { | 1989 if (mode == kProfiled) { |
| 1816 PROFILE(heap_->isolate(), | 1990 PROFILE(heap_->isolate(), |
| 1817 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); | 1991 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); |
| 1818 } | 1992 } |
| 1819 heap_->CopyBlock(dst_addr, src_addr, size); | 1993 heap_->CopyBlock(dst_addr, src_addr, size); |
| 1820 Code::cast(dst)->Relocate(dst_addr - src_addr); | 1994 Code::cast(dst)->Relocate(dst_addr - src_addr); |
| 1821 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); | 1995 migration_observer_->Move(src, dst); |
| 1822 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); | 1996 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); |
| 1823 } else { | 1997 } else { |
| 1824 DCHECK_OBJECT_SIZE(size); | 1998 DCHECK_OBJECT_SIZE(size); |
| 1825 DCHECK(dest == NEW_SPACE); | 1999 DCHECK(dest == NEW_SPACE); |
| 1826 heap_->CopyBlock(dst_addr, src_addr, size); | 2000 heap_->CopyBlock(dst_addr, src_addr, size); |
| 2001 migration_observer_->Move(src, dst); | |
| 1827 } | 2002 } |
| 1828 if (mode == kProfiled) { | 2003 if (mode == kProfiled) { |
| 1829 heap_->OnMoveEvent(dst, src, size); | 2004 heap_->OnMoveEvent(dst, src, size); |
| 1830 } | 2005 } |
| 1831 base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr), | 2006 base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr), |
| 1832 reinterpret_cast<base::AtomicWord>(dst_addr)); | 2007 reinterpret_cast<base::AtomicWord>(dst_addr)); |
| 1833 } | 2008 } |
| 1834 | 2009 |
| 1835 #ifdef VERIFY_HEAP | 2010 #ifdef VERIFY_HEAP |
| 1836 bool AbortCompactionForTesting(HeapObject* object) { | 2011 bool AbortCompactionForTesting(HeapObject* object) { |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 1848 } | 2023 } |
| 1849 } | 2024 } |
| 1850 } | 2025 } |
| 1851 return false; | 2026 return false; |
| 1852 } | 2027 } |
| 1853 #endif // VERIFY_HEAP | 2028 #endif // VERIFY_HEAP |
| 1854 | 2029 |
| 1855 Heap* heap_; | 2030 Heap* heap_; |
| 1856 CompactionSpaceCollection* compaction_spaces_; | 2031 CompactionSpaceCollection* compaction_spaces_; |
| 1857 RecordMigratedSlotVisitor* record_visitor_; | 2032 RecordMigratedSlotVisitor* record_visitor_; |
| 2033 MigrationObserver* migration_observer_; | |
| 1858 bool profiling_; | 2034 bool profiling_; |
| 1859 }; | 2035 }; |
| 1860 | 2036 |
| 1861 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { | 2037 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { |
| 1862 public: | 2038 public: |
| 1863 static const intptr_t kLabSize = 4 * KB; | 2039 static const intptr_t kLabSize = 4 * KB; |
| 1864 static const intptr_t kMaxLabObjectSize = 256; | 2040 static const intptr_t kMaxLabObjectSize = 256; |
| 1865 | 2041 |
| 1866 explicit EvacuateNewSpaceVisitor(Heap* heap, | 2042 explicit EvacuateNewSpaceVisitor(Heap* heap, |
| 1867 CompactionSpaceCollection* compaction_spaces, | 2043 CompactionSpaceCollection* compaction_spaces, |
| 1868 RecordMigratedSlotVisitor* record_visitor, | 2044 RecordMigratedSlotVisitor* record_visitor, |
| 2045 MigrationObserver* migration_observer, | |
| 1869 base::HashMap* local_pretenuring_feedback) | 2046 base::HashMap* local_pretenuring_feedback) |
| 1870 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor), | 2047 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, |
| 2048 migration_observer), | |
| 1871 buffer_(LocalAllocationBuffer::InvalidBuffer()), | 2049 buffer_(LocalAllocationBuffer::InvalidBuffer()), |
| 1872 space_to_allocate_(NEW_SPACE), | 2050 space_to_allocate_(NEW_SPACE), |
| 1873 promoted_size_(0), | 2051 promoted_size_(0), |
| 1874 semispace_copied_size_(0), | 2052 semispace_copied_size_(0), |
| 1875 local_pretenuring_feedback_(local_pretenuring_feedback) {} | 2053 local_pretenuring_feedback_(local_pretenuring_feedback) {} |
| 1876 | 2054 |
| 1877 inline bool Visit(HeapObject* object) override { | 2055 inline bool Visit(HeapObject* object) override { |
| 1878 heap_->UpdateAllocationSite<Heap::kCached>(object, | 2056 heap_->UpdateAllocationSite<Heap::kCached>(object, |
| 1879 local_pretenuring_feedback_); | 2057 local_pretenuring_feedback_); |
| 1880 int size = object->Size(); | 2058 int size = object->Size(); |
| (...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2043 Heap* heap_; | 2221 Heap* heap_; |
| 2044 RecordMigratedSlotVisitor* record_visitor_; | 2222 RecordMigratedSlotVisitor* record_visitor_; |
| 2045 intptr_t moved_bytes_; | 2223 intptr_t moved_bytes_; |
| 2046 base::HashMap* local_pretenuring_feedback_; | 2224 base::HashMap* local_pretenuring_feedback_; |
| 2047 }; | 2225 }; |
| 2048 | 2226 |
| 2049 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { | 2227 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { |
| 2050 public: | 2228 public: |
| 2051 EvacuateOldSpaceVisitor(Heap* heap, | 2229 EvacuateOldSpaceVisitor(Heap* heap, |
| 2052 CompactionSpaceCollection* compaction_spaces, | 2230 CompactionSpaceCollection* compaction_spaces, |
| 2053 RecordMigratedSlotVisitor* record_visitor) | 2231 RecordMigratedSlotVisitor* record_visitor, |
| 2054 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor) {} | 2232 MigrationObserver* migration_observer) |
| 2233 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, | |
| 2234 migration_observer) {} | |
| 2055 | 2235 |
| 2056 inline bool Visit(HeapObject* object) override { | 2236 inline bool Visit(HeapObject* object) override { |
| 2057 CompactionSpace* target_space = compaction_spaces_->Get( | 2237 CompactionSpace* target_space = compaction_spaces_->Get( |
| 2058 Page::FromAddress(object->address())->owner()->identity()); | 2238 Page::FromAddress(object->address())->owner()->identity()); |
| 2059 HeapObject* target_object = nullptr; | 2239 HeapObject* target_object = nullptr; |
| 2060 if (TryEvacuateObject(target_space, object, &target_object)) { | 2240 if (TryEvacuateObject(target_space, object, &target_object)) { |
| 2061 DCHECK(object->map_word().IsForwardingAddress()); | 2241 DCHECK(object->map_word().IsForwardingAddress()); |
| 2062 return true; | 2242 return true; |
| 2063 } | 2243 } |
| 2064 return false; | 2244 return false; |
| (...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2404 heap_object); | 2584 heap_object); |
| 2405 return KEEP_SLOT; | 2585 return KEEP_SLOT; |
| 2406 } | 2586 } |
| 2407 return REMOVE_SLOT; | 2587 return REMOVE_SLOT; |
| 2408 } | 2588 } |
| 2409 | 2589 |
| 2410 static bool IsUnmarkedObject(Heap* heap, Object** p) { | 2590 static bool IsUnmarkedObject(Heap* heap, Object** p) { |
| 2411 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); | 2591 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); |
| 2412 return heap->InNewSpace(*p) && | 2592 return heap->InNewSpace(*p) && |
| 2413 !ObjectMarking::IsBlack(HeapObject::cast(*p), | 2593 !ObjectMarking::IsBlack(HeapObject::cast(*p), |
| 2414 MarkingState::Internal(HeapObject::cast(*p))); | 2594 MarkingState::External(HeapObject::cast(*p))); |
| 2415 } | 2595 } |
| 2416 | 2596 |
| 2417 void MinorMarkCompactCollector::MarkLiveObjects() { | 2597 void MinorMarkCompactCollector::MarkLiveObjects() { |
| 2418 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); | 2598 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); |
| 2419 | 2599 |
| 2420 PostponeInterruptsScope postpone(isolate()); | 2600 PostponeInterruptsScope postpone(isolate()); |
| 2421 | 2601 |
| 2422 StaticYoungGenerationMarkingVisitor::Initialize(heap()); | 2602 StaticYoungGenerationMarkingVisitor::Initialize(heap()); |
| 2423 RootMarkingVisitor root_visitor(this); | 2603 RootMarkingVisitor root_visitor(this); |
| 2424 | 2604 |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2456 heap()->IterateEncounteredWeakCollections(&root_visitor); | 2636 heap()->IterateEncounteredWeakCollections(&root_visitor); |
| 2457 ProcessMarkingDeque(); | 2637 ProcessMarkingDeque(); |
| 2458 } | 2638 } |
| 2459 | 2639 |
| 2460 { | 2640 { |
| 2461 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); | 2641 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); |
| 2462 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( | 2642 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
| 2463 &IsUnmarkedObject); | 2643 &IsUnmarkedObject); |
| 2464 isolate() | 2644 isolate() |
| 2465 ->global_handles() | 2645 ->global_handles() |
| 2466 ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>( | 2646 ->IterateNewSpaceWeakUnmodifiedRoots< |
| 2467 &root_visitor); | 2647 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor); |
| 2468 ProcessMarkingDeque(); | 2648 ProcessMarkingDeque(); |
| 2469 } | 2649 } |
| 2470 | 2650 |
| 2471 marking_deque()->StopUsing(); | 2651 marking_deque()->StopUsing(); |
| 2472 } | 2652 } |
| 2473 | 2653 |
| 2474 void MinorMarkCompactCollector::ProcessMarkingDeque() { | 2654 void MinorMarkCompactCollector::ProcessMarkingDeque() { |
| 2475 EmptyMarkingDeque(); | 2655 EmptyMarkingDeque(); |
| 2476 DCHECK(!marking_deque()->overflowed()); | 2656 DCHECK(!marking_deque()->overflowed()); |
| 2477 DCHECK(marking_deque()->IsEmpty()); | 2657 DCHECK(marking_deque()->IsEmpty()); |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 2489 object, MarkingState::External(object)))); | 2669 object, MarkingState::External(object)))); |
| 2490 | 2670 |
| 2491 Map* map = object->map(); | 2671 Map* map = object->map(); |
| 2492 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( | 2672 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( |
| 2493 object, MarkingState::External(object)))); | 2673 object, MarkingState::External(object)))); |
| 2494 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); | 2674 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); |
| 2495 } | 2675 } |
| 2496 } | 2676 } |
| 2497 | 2677 |
| 2498 void MinorMarkCompactCollector::CollectGarbage() { | 2678 void MinorMarkCompactCollector::CollectGarbage() { |
| 2679 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); | |
| 2680 | |
| 2499 MarkLiveObjects(); | 2681 MarkLiveObjects(); |
| 2500 | 2682 ClearNonLiveReferences(); |
| 2501 #ifdef VERIFY_HEAP | 2683 #ifdef VERIFY_HEAP |
| 2502 if (FLAG_verify_heap) { | 2684 if (FLAG_verify_heap) { |
| 2503 YoungGenerationMarkingVerifier verifier(heap()); | 2685 YoungGenerationMarkingVerifier verifier(heap()); |
| 2504 verifier.Run(); | 2686 verifier.Run(); |
| 2505 } | 2687 } |
| 2506 #endif // VERIFY_HEAP | 2688 #endif // VERIFY_HEAP |
| 2689 | |
| 2690 std::vector<HeapObject*> black_allocation_objects; | |
| 2691 EvacuateNewSpace(&black_allocation_objects); | |
| 2692 #ifdef VERIFY_HEAP | |
| 2693 if (FLAG_verify_heap) { | |
| 2694 YoungGenerationEvacuationVerifier verifier(heap()); | |
| 2695 verifier.Run(); | |
| 2696 } | |
| 2697 #endif // VERIFY_HEAP | |
| 2698 | |
| 2699 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); | |
| 2700 | |
| 2701 // Process black allocation objects after updating pointers as we otherwise | |
| 2702 // would end up with objects on the marking deque that potentially forward | |
| 2703 // to white objects. | |
| 2704 // TODO(mlippautz): Instead of processing them explicitly, we should just add | |
| 2705 // them to the marking deque for further processing. | |
| 2706 { | |
| 2707 TRACE_GC(heap()->tracer(), | |
| 2708 GCTracer::Scope::MINOR_MC_EVACUATE_PROCESS_BLACK_ALLOCATION); | |
| 2709 for (HeapObject* object : black_allocation_objects) { | |
| 2710 CHECK(ObjectMarking::IsBlack(object, MarkingState::Internal(object))); | |
| 2711 heap()->incremental_marking()->IterateBlackObject(object); | |
| 2712 } | |
| 2713 heap()->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); | |
| 2714 } | |
| 2715 | |
| 2716 { | |
| 2717 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); | |
| 2718 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), | |
| 2719 heap()->new_space()->FromSpaceEnd())) { | |
| 2720 marking_state(p).ClearLiveness(); | |
| 2721 } | |
| 2722 } | |
| 2723 } | |
| 2724 | |
| 2725 void MinorMarkCompactCollector::ClearNonLiveReferences() { | |
| 2726 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); | |
| 2727 | |
| 2728 { | |
| 2729 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); | |
| 2730 // Internalized strings are always stored in old space, so there is no need | |
| 2731 // to clean them here. | |
| 2732 YoungGenerationExternalStringTableCleaner external_visitor(*this); | |
| 2733 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); | |
| 2734 heap()->external_string_table_.CleanUpNewSpaceStrings(); | |
| 2735 } | |
| 2736 | |
| 2737 { | |
| 2738 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); | |
| 2739 // Process the weak references. | |
| 2740 MinorMarkCompactWeakObjectRetainer retainer(*this); | |
| 2741 heap()->ProcessYoungWeakReferences(&retainer); | |
| 2742 } | |
| 2743 } | |
| 2744 | |
| 2745 void MinorMarkCompactCollector::EvacuatePrologue() { | |
| 2746 NewSpace* new_space = heap()->new_space(); | |
| 2747 // Append the list of new space pages to be processed. | |
| 2748 for (Page* p : PageRange(new_space->bottom(), new_space->top())) { | |
| 2749 new_space_evacuation_pages_.Add(p); | |
| 2750 } | |
| 2751 new_space->Flip(); | |
| 2752 new_space->ResetAllocationInfo(); | |
| 2753 } | |
| 2754 | |
| 2755 void MinorMarkCompactCollector::EvacuateEpilogue() { | |
| 2756 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | |
| 2757 } | |
| 2758 | |
| 2759 void MinorMarkCompactCollector::EvacuateNewSpace( | |
| 2760 std::vector<HeapObject*>* black_allocation_objects) { | |
| 2761 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | |
| 2762 Heap::RelocationLock relocation_lock(heap()); | |
| 2763 | |
| 2764 { | |
| 2765 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); | |
| 2766 EvacuatePrologue(); | |
| 2767 } | |
| 2768 | |
| 2769 { | |
| 2770 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | |
| 2771 EvacuatePagesInParallel(black_allocation_objects); | |
| 2772 } | |
| 2773 | |
| 2774 UpdatePointersAfterEvacuation(); | |
| 2775 | |
| 2776 { | |
| 2777 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); | |
| 2778 if (!heap()->new_space()->Rebalance()) { | |
| 2779 FatalProcessOutOfMemory("NewSpace::Rebalance"); | |
| 2780 } | |
| 2781 } | |
| 2782 | |
| 2783 // Give pages that are queued to be freed back to the OS. | |
| 2784 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | |
| 2785 | |
| 2786 { | |
| 2787 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | |
| 2788 // TODO(mlippautz): Implement page promotion. | |
| 2789 new_space_evacuation_pages_.Rewind(0); | |
| 2790 } | |
| 2791 | |
| 2792 { | |
| 2793 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); | |
| 2794 EvacuateEpilogue(); | |
| 2795 } | |
| 2507 } | 2796 } |
| 2508 | 2797 |
| 2509 void MarkCompactCollector::MarkLiveObjects() { | 2798 void MarkCompactCollector::MarkLiveObjects() { |
| 2510 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); | 2799 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); |
| 2511 // The recursive GC marker detects when it is nearing stack overflow, | 2800 // The recursive GC marker detects when it is nearing stack overflow, |
| 2512 // and switches to a different marking system. JS interrupts interfere | 2801 // and switches to a different marking system. JS interrupts interfere |
| 2513 // with the C stack limit check. | 2802 // with the C stack limit check. |
| 2514 PostponeInterruptsScope postpone(isolate()); | 2803 PostponeInterruptsScope postpone(isolate()); |
| 2515 | 2804 |
| 2516 { | 2805 { |
| (...skipping 659 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3176 } | 3465 } |
| 3177 | 3466 |
| 3178 // NewSpacePages with more live bytes than this threshold qualify for fast | 3467 // NewSpacePages with more live bytes than this threshold qualify for fast |
| 3179 // evacuation. | 3468 // evacuation. |
| 3180 static int PageEvacuationThreshold() { | 3469 static int PageEvacuationThreshold() { |
| 3181 if (FLAG_page_promotion) | 3470 if (FLAG_page_promotion) |
| 3182 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; | 3471 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; |
| 3183 return Page::kAllocatableMemory + kPointerSize; | 3472 return Page::kAllocatableMemory + kPointerSize; |
| 3184 } | 3473 } |
| 3185 | 3474 |
| 3186 Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) | 3475 Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, |
| 3476 MigrationObserver* migration_observer) | |
| 3187 : heap_(heap), | 3477 : heap_(heap), |
| 3188 compaction_spaces_(heap_), | 3478 compaction_spaces_(heap_), |
| 3189 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), | 3479 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), |
| 3190 new_space_visitor_(heap_, &compaction_spaces_, record_visitor, | 3480 new_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| 3191 &local_pretenuring_feedback_), | 3481 migration_observer, &local_pretenuring_feedback_), |
| 3192 new_to_new_page_visitor_(heap_, record_visitor, | 3482 new_to_new_page_visitor_(heap_, record_visitor, |
| 3193 &local_pretenuring_feedback_), | 3483 &local_pretenuring_feedback_), |
| 3194 new_to_old_page_visitor_(heap_, record_visitor, | 3484 new_to_old_page_visitor_(heap_, record_visitor, |
| 3195 &local_pretenuring_feedback_), | 3485 &local_pretenuring_feedback_), |
| 3196 | 3486 |
| 3197 old_space_visitor_(heap_, &compaction_spaces_, record_visitor), | 3487 old_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| 3488 migration_observer), | |
| 3198 duration_(0.0), | 3489 duration_(0.0), |
| 3199 bytes_compacted_(0) {} | 3490 bytes_compacted_(0) {} |
| 3200 | 3491 |
| 3201 virtual ~Evacuator() {} | 3492 virtual ~Evacuator() {} |
| 3202 | 3493 |
| 3203 bool EvacuatePage(Page* page); | 3494 bool EvacuatePage(Page* page); |
| 3204 | 3495 |
| 3205 // Merge back locally cached info sequentially. Note that this method needs | 3496 // Merge back locally cached info sequentially. Note that this method needs |
| 3206 // to be called from the main thread. | 3497 // to be called from the main thread. |
| 3207 inline void Finalize(); | 3498 inline void Finalize(); |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3283 new_space_visitor_.promoted_size() + | 3574 new_space_visitor_.promoted_size() + |
| 3284 new_space_visitor_.semispace_copied_size() + | 3575 new_space_visitor_.semispace_copied_size() + |
| 3285 new_to_old_page_visitor_.moved_bytes() + | 3576 new_to_old_page_visitor_.moved_bytes() + |
| 3286 new_to_new_page_visitor_.moved_bytes()); | 3577 new_to_new_page_visitor_.moved_bytes()); |
| 3287 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | 3578 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
| 3288 } | 3579 } |
| 3289 | 3580 |
| 3290 class FullEvacuator : public Evacuator { | 3581 class FullEvacuator : public Evacuator { |
| 3291 public: | 3582 public: |
| 3292 FullEvacuator(MarkCompactCollector* collector, | 3583 FullEvacuator(MarkCompactCollector* collector, |
| 3293 RecordMigratedSlotVisitor* record_visitor) | 3584 RecordMigratedSlotVisitor* record_visitor, |
| 3294 : Evacuator(collector->heap(), record_visitor), collector_(collector) {} | 3585 MigrationObserver* migration_observer) |
| 3586 : Evacuator(collector->heap(), record_visitor, migration_observer), | |
| 3587 collector_(collector) {} | |
| 3295 | 3588 |
| 3296 protected: | 3589 protected: |
| 3297 bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; | 3590 bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; |
| 3298 | 3591 |
| 3299 MarkCompactCollector* collector_; | 3592 MarkCompactCollector* collector_; |
| 3300 }; | 3593 }; |
| 3301 | 3594 |
| 3302 bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) { | 3595 bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) { |
| 3303 bool success = false; | 3596 bool success = false; |
| 3304 LiveObjectVisitor object_visitor; | 3597 LiveObjectVisitor object_visitor; |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3348 success = false; | 3641 success = false; |
| 3349 } else { | 3642 } else { |
| 3350 ArrayBufferTracker::ProcessBuffers( | 3643 ArrayBufferTracker::ProcessBuffers( |
| 3351 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | 3644 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3352 } | 3645 } |
| 3353 break; | 3646 break; |
| 3354 } | 3647 } |
| 3355 return success; | 3648 return success; |
| 3356 } | 3649 } |
| 3357 | 3650 |
| 3651 class YoungGenerationEvacuator : public Evacuator { | |
| 3652 public: | |
| 3653 YoungGenerationEvacuator(MinorMarkCompactCollector* collector, | |
| 3654 RecordMigratedSlotVisitor* record_visitor, | |
| 3655 MigrationObserver* migration_observer) | |
| 3656 : Evacuator(collector->heap(), record_visitor, migration_observer), | |
| 3657 collector_(collector) {} | |
| 3658 | |
| 3659 protected: | |
| 3660 bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; | |
| 3661 | |
| 3662 MinorMarkCompactCollector* collector_; | |
| 3663 }; | |
| 3664 | |
| 3665 bool YoungGenerationEvacuator::RawEvacuatePage(Page* page, | |
| 3666 intptr_t* live_bytes) { | |
| 3667 bool success = false; | |
| 3668 LiveObjectVisitor object_visitor; | |
| 3669 const MarkingState state = collector_->marking_state(page); | |
| 3670 *live_bytes = state.live_bytes(); | |
| 3671 switch (ComputeEvacuationMode(page)) { | |
| 3672 case kObjectsNewToOld: | |
| 3673 success = object_visitor.VisitBlackObjects( | |
| 3674 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); | |
| 3675 DCHECK(success); | |
| 3676 ArrayBufferTracker::ProcessBuffers( | |
| 3677 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3678 break; | |
| 3679 case kPageNewToOld: | |
| 3680 // TODO(mlippautz): Implement page promotion. | |
| 3681 UNREACHABLE(); | |
| 3682 break; | |
| 3683 case kPageNewToNew: | |
| 3684 // TODO(mlippautz): Implement page promotion. | |
| 3685 UNREACHABLE(); | |
| 3686 break; | |
| 3687 case kObjectsOldToOld: | |
| 3688 UNREACHABLE(); | |
| 3689 break; | |
| 3690 } | |
| 3691 return success; | |
| 3692 } | |
| 3693 | |
| 3358 class EvacuationJobTraits { | 3694 class EvacuationJobTraits { |
| 3359 public: | 3695 public: |
| 3360 typedef int* PerPageData; // Pointer to number of aborted pages. | 3696 struct PageData { |
| 3697 int* abandoned_pages; // Pointer to number of aborted pages. | |
| 3698 MarkingState marking_state; | |
| 3699 }; | |
| 3700 | |
| 3701 typedef PageData PerPageData; | |
| 3361 typedef Evacuator* PerTaskData; | 3702 typedef Evacuator* PerTaskData; |
| 3362 | 3703 |
| 3363 static const bool NeedSequentialFinalization = true; | 3704 static const bool NeedSequentialFinalization = true; |
| 3364 | 3705 |
| 3365 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3706 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
| 3366 MemoryChunk* chunk, PerPageData) { | 3707 MemoryChunk* chunk, PerPageData) { |
| 3367 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3708 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
| 3368 } | 3709 } |
| 3369 | 3710 |
| 3370 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3711 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 3383 if (success) { | 3724 if (success) { |
| 3384 DCHECK(p->IsEvacuationCandidate()); | 3725 DCHECK(p->IsEvacuationCandidate()); |
| 3385 DCHECK(p->SweepingDone()); | 3726 DCHECK(p->SweepingDone()); |
| 3386 p->Unlink(); | 3727 p->Unlink(); |
| 3387 } else { | 3728 } else { |
| 3388 // We have partially compacted the page, i.e., some objects may have | 3729 // We have partially compacted the page, i.e., some objects may have |
| 3389 // moved, others are still in place. | 3730 // moved, others are still in place. |
| 3390 p->ClearEvacuationCandidate(); | 3731 p->ClearEvacuationCandidate(); |
| 3391 // Slots have already been recorded so we just need to add it to the | 3732 // Slots have already been recorded so we just need to add it to the |
| 3392 // sweeper, which will happen after updating pointers. | 3733 // sweeper, which will happen after updating pointers. |
| 3393 *data += 1; | 3734 *data.abandoned_pages += 1; |
| 3394 } | 3735 } |
| 3395 break; | 3736 break; |
| 3396 default: | 3737 default: |
| 3397 UNREACHABLE(); | 3738 UNREACHABLE(); |
| 3398 } | 3739 } |
| 3399 } | 3740 } |
| 3400 }; | 3741 }; |
| 3401 | 3742 |
| 3743 template <class Evacuator, class Collector> | |
| 3744 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( | |
| 3745 Collector* collector, PageParallelJob<EvacuationJobTraits>* job, | |
| 3746 RecordMigratedSlotVisitor* record_visitor, MigrationObserver* observer, | |
| 3747 const intptr_t live_bytes, const int& abandoned_pages) { | |
| 3748 // Used for trace summary. | |
| 3749 double compaction_speed = 0; | |
| 3750 if (FLAG_trace_evacuation) { | |
| 3751 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | |
| 3752 } | |
| 3753 | |
| 3754 const int wanted_num_tasks = | |
| 3755 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); | |
| 3756 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; | |
| 3757 for (int i = 0; i < wanted_num_tasks; i++) { | |
| 3758 evacuators[i] = new Evacuator(collector, record_visitor, observer); | |
| 3759 } | |
| 3760 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | |
| 3761 const Address top = heap()->new_space()->top(); | |
| 3762 for (int i = 0; i < wanted_num_tasks; i++) { | |
| 3763 evacuators[i]->Finalize(); | |
| 3764 // Try to find the last LAB that was used for new space allocation in | |
| 3765 // evacuation tasks. If it was adjacent to the current top, move top back. | |
| 3766 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); | |
| 3767 if (info.limit() != nullptr && info.limit() == top) { | |
| 3768 DCHECK_NOT_NULL(info.top()); | |
| 3769 *heap()->new_space()->allocation_top_address() = info.top(); | |
| 3770 } | |
| 3771 delete evacuators[i]; | |
| 3772 } | |
| 3773 delete[] evacuators; | |
| 3774 | |
| 3775 if (FLAG_trace_evacuation) { | |
| 3776 PrintIsolate(isolate(), | |
| 3777 "%8.0f ms: evacuation-summary: parallel=%s pages=%d " | |
| 3778 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS | |
| 3779 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", | |
| 3780 isolate()->time_millis_since_init(), | |
| 3781 FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(), | |
| 3782 abandoned_pages, wanted_num_tasks, job->NumberOfTasks(), | |
| 3783 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), | |
| 3784 live_bytes, compaction_speed); | |
| 3785 } | |
| 3786 } | |
| 3787 | |
| 3402 void MarkCompactCollector::EvacuatePagesInParallel() { | 3788 void MarkCompactCollector::EvacuatePagesInParallel() { |
| 3403 PageParallelJob<EvacuationJobTraits> job( | 3789 PageParallelJob<EvacuationJobTraits> job( |
| 3404 heap_, heap_->isolate()->cancelable_task_manager(), | 3790 heap_, heap_->isolate()->cancelable_task_manager(), |
| 3405 &page_parallel_job_semaphore_); | 3791 &page_parallel_job_semaphore_); |
| 3406 | 3792 |
| 3407 int abandoned_pages = 0; | 3793 int abandoned_pages = 0; |
| 3408 intptr_t live_bytes = 0; | 3794 intptr_t live_bytes = 0; |
| 3409 for (Page* page : old_space_evacuation_pages_) { | 3795 for (Page* page : old_space_evacuation_pages_) { |
| 3410 live_bytes += MarkingState::Internal(page).live_bytes(); | 3796 live_bytes += MarkingState::Internal(page).live_bytes(); |
| 3411 job.AddPage(page, &abandoned_pages); | 3797 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| 3412 } | 3798 } |
| 3413 | 3799 |
| 3414 const bool reduce_memory = heap()->ShouldReduceMemory(); | 3800 const bool reduce_memory = heap()->ShouldReduceMemory(); |
| 3415 const Address age_mark = heap()->new_space()->age_mark(); | 3801 const Address age_mark = heap()->new_space()->age_mark(); |
| 3416 for (Page* page : new_space_evacuation_pages_) { | 3802 for (Page* page : new_space_evacuation_pages_) { |
| 3417 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); | 3803 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); |
| 3418 live_bytes += live_bytes_on_page; | 3804 live_bytes += live_bytes_on_page; |
| 3419 if (!reduce_memory && !page->NeverEvacuate() && | 3805 if (!reduce_memory && !page->NeverEvacuate() && |
| 3420 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && | 3806 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && |
| 3421 !page->Contains(age_mark) && | 3807 !page->Contains(age_mark) && |
| 3422 heap()->CanExpandOldGeneration(live_bytes_on_page)) { | 3808 heap()->CanExpandOldGeneration(live_bytes_on_page)) { |
| 3423 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { | 3809 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
| 3424 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); | 3810 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); |
| 3425 } else { | 3811 } else { |
| 3426 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); | 3812 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); |
| 3427 } | 3813 } |
| 3428 } | 3814 } |
| 3429 | 3815 |
| 3430 job.AddPage(page, &abandoned_pages); | 3816 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| 3431 } | 3817 } |
| 3432 DCHECK_GE(job.NumberOfPages(), 1); | 3818 DCHECK_GE(job.NumberOfPages(), 1); |
| 3433 | 3819 |
| 3434 // Used for trace summary. | 3820 MigrationObserver observer(heap()); |
| 3435 double compaction_speed = 0; | 3821 RecordMigratedSlotVisitor record_visitor(this); |
| 3436 if (FLAG_trace_evacuation) { | 3822 CreateAndExecuteEvacuationTasks<FullEvacuator>( |
| 3437 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3823 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); |
| 3824 } | |
| 3825 | |
| 3826 void MinorMarkCompactCollector::EvacuatePagesInParallel( | |
| 3827 std::vector<HeapObject*>* black_allocation_objects) { | |
| 3828 PageParallelJob<EvacuationJobTraits> job( | |
| 3829 heap_, heap_->isolate()->cancelable_task_manager(), | |
| 3830 &page_parallel_job_semaphore_); | |
| 3831 int abandoned_pages = 0; | |
| 3832 intptr_t live_bytes = 0; | |
| 3833 | |
| 3834 for (Page* page : new_space_evacuation_pages_) { | |
| 3835 intptr_t live_bytes_on_page = marking_state(page).live_bytes(); | |
| 3836 live_bytes += live_bytes_on_page; | |
| 3837 // TODO(mlippautz): Implement page promotion. | |
| 3838 job.AddPage(page, {&abandoned_pages, marking_state(page)}); | |
| 3438 } | 3839 } |
| 3840 DCHECK_GE(job.NumberOfPages(), 1); | |
| 3439 | 3841 |
| 3440 const int wanted_num_tasks = | 3842 YoungGenerationMigrationObserver observer( |
| 3441 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); | 3843 heap(), heap()->mark_compact_collector(), black_allocation_objects); |
| 3442 FullEvacuator** evacuators = new FullEvacuator*[wanted_num_tasks]; | 3844 YoungGenerationRecordMigratedSlotVisitor record_visitor( |
| 3443 RecordMigratedSlotVisitor record_visitor(this); | 3845 heap()->mark_compact_collector()); |
| 3444 for (int i = 0; i < wanted_num_tasks; i++) { | 3846 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( |
| 3445 evacuators[i] = new FullEvacuator(this, &record_visitor); | 3847 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); |
| 3446 } | |
| 3447 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | |
| 3448 const Address top = heap()->new_space()->top(); | |
| 3449 for (int i = 0; i < wanted_num_tasks; i++) { | |
| 3450 evacuators[i]->Finalize(); | |
| 3451 // Try to find the last LAB that was used for new space allocation in | |
| 3452 // evacuation tasks. If it was adjacent to the current top, move top back. | |
| 3453 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); | |
| 3454 if (info.limit() != nullptr && info.limit() == top) { | |
| 3455 DCHECK_NOT_NULL(info.top()); | |
| 3456 *heap()->new_space()->allocation_top_address() = info.top(); | |
| 3457 } | |
| 3458 delete evacuators[i]; | |
| 3459 } | |
| 3460 delete[] evacuators; | |
| 3461 | |
| 3462 if (FLAG_trace_evacuation) { | |
| 3463 PrintIsolate(isolate(), | |
| 3464 "%8.0f ms: evacuation-summary: parallel=%s pages=%d " | |
| 3465 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS | |
| 3466 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", | |
| 3467 isolate()->time_millis_since_init(), | |
| 3468 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), | |
| 3469 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), | |
| 3470 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), | |
| 3471 live_bytes, compaction_speed); | |
| 3472 } | |
| 3473 } | 3848 } |
| 3474 | 3849 |
| 3475 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3850 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| 3476 public: | 3851 public: |
| 3477 virtual Object* RetainAs(Object* object) { | 3852 virtual Object* RetainAs(Object* object) { |
| 3478 if (object->IsHeapObject()) { | 3853 if (object->IsHeapObject()) { |
| 3479 HeapObject* heap_object = HeapObject::cast(object); | 3854 HeapObject* heap_object = HeapObject::cast(object); |
| 3480 MapWord map_word = heap_object->map_word(); | 3855 MapWord map_word = heap_object->map_word(); |
| 3481 if (map_word.IsForwardingAddress()) { | 3856 if (map_word.IsForwardingAddress()) { |
| 3482 return map_word.ToForwardingAddress(); | 3857 return map_word.ToForwardingAddress(); |
| (...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3787 UpdateUntypedPointers(heap, chunk); | 4162 UpdateUntypedPointers(heap, chunk); |
| 3788 UpdateTypedPointers(heap, chunk); | 4163 UpdateTypedPointers(heap, chunk); |
| 3789 return true; | 4164 return true; |
| 3790 } | 4165 } |
| 3791 static const bool NeedSequentialFinalization = false; | 4166 static const bool NeedSequentialFinalization = false; |
| 3792 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4167 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
| 3793 } | 4168 } |
| 3794 | 4169 |
| 3795 private: | 4170 private: |
| 3796 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | 4171 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
| 4172 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); | |
| 3797 if (type == OLD_TO_NEW) { | 4173 if (type == OLD_TO_NEW) { |
| 3798 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { | 4174 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
| 3799 return CheckAndUpdateOldToNewSlot(heap, slot); | 4175 return CheckAndUpdateOldToNewSlot(heap, slot); |
| 3800 }); | 4176 }); |
| 3801 } else { | 4177 } else { |
| 3802 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { | 4178 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { |
| 3803 return UpdateSlot(reinterpret_cast<Object**>(slot)); | 4179 return UpdateSlot(reinterpret_cast<Object**>(slot)); |
| 3804 }); | 4180 }); |
| 3805 } | 4181 } |
| 3806 } | 4182 } |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3893 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4269 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3894 RememberedSet<type>::IterateMemoryChunks( | 4270 RememberedSet<type>::IterateMemoryChunks( |
| 3895 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 4271 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
| 3896 int num_pages = job.NumberOfPages(); | 4272 int num_pages = job.NumberOfPages(); |
| 3897 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 4273 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
| 3898 job.Run(num_tasks, [](int i) { return 0; }); | 4274 job.Run(num_tasks, [](int i) { return 0; }); |
| 3899 } | 4275 } |
| 3900 | 4276 |
| 3901 class ToSpacePointerUpdateJobTraits { | 4277 class ToSpacePointerUpdateJobTraits { |
| 3902 public: | 4278 public: |
| 3903 typedef std::pair<Address, Address> PerPageData; | 4279 struct PageData { |
| 4280 Address start; | |
| 4281 Address end; | |
| 4282 MarkingState marking_state; | |
| 4283 }; | |
| 4284 | |
| 4285 typedef PageData PerPageData; | |
| 3904 typedef PointersUpdatingVisitor* PerTaskData; | 4286 typedef PointersUpdatingVisitor* PerTaskData; |
| 3905 | 4287 |
| 3906 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 4288 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
| 3907 MemoryChunk* chunk, PerPageData limits) { | 4289 MemoryChunk* chunk, PerPageData page_data) { |
| 3908 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | 4290 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3909 // New->new promoted pages contain garbage so they require iteration | 4291 // New->new promoted pages contain garbage so they require iteration |
| 3910 // using markbits. | 4292 // using markbits. |
| 3911 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); | 4293 ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data); |
| 3912 } else { | 4294 } else { |
| 3913 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); | 4295 ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); |
| 3914 } | 4296 } |
| 3915 return true; | 4297 return true; |
| 3916 } | 4298 } |
| 3917 | 4299 |
| 3918 static const bool NeedSequentialFinalization = false; | 4300 static const bool NeedSequentialFinalization = false; |
| 3919 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4301 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
| 3920 } | 4302 } |
| 3921 | 4303 |
| 3922 private: | 4304 private: |
| 3923 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, | 4305 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
| 3924 MemoryChunk* chunk, | 4306 MemoryChunk* chunk, |
| 3925 PerPageData limits) { | 4307 PerPageData page_data) { |
| 3926 for (Address cur = limits.first; cur < limits.second;) { | 4308 for (Address cur = page_data.start; cur < page_data.end;) { |
| 3927 HeapObject* object = HeapObject::FromAddress(cur); | 4309 HeapObject* object = HeapObject::FromAddress(cur); |
| 3928 Map* map = object->map(); | 4310 Map* map = object->map(); |
| 3929 int size = object->SizeFromMap(map); | 4311 int size = object->SizeFromMap(map); |
| 3930 object->IterateBody(map->instance_type(), size, visitor); | 4312 object->IterateBody(map->instance_type(), size, visitor); |
| 3931 cur += size; | 4313 cur += size; |
| 3932 } | 4314 } |
| 3933 } | 4315 } |
| 3934 | 4316 |
| 3935 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, | 4317 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
| 3936 MemoryChunk* chunk, | 4318 MemoryChunk* chunk, |
| 3937 PerPageData limits) { | 4319 PerPageData page_data) { |
| 3938 LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk)); | 4320 LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state); |
| 3939 HeapObject* object = NULL; | 4321 HeapObject* object = NULL; |
| 3940 while ((object = it.Next()) != NULL) { | 4322 while ((object = it.Next()) != NULL) { |
| 3941 Map* map = object->map(); | 4323 Map* map = object->map(); |
| 3942 int size = object->SizeFromMap(map); | 4324 int size = object->SizeFromMap(map); |
| 3943 object->IterateBody(map->instance_type(), size, visitor); | 4325 object->IterateBody(map->instance_type(), size, visitor); |
| 3944 } | 4326 } |
| 3945 } | 4327 } |
| 3946 }; | 4328 }; |
| 3947 | 4329 |
| 3948 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 4330 template <class MarkingStateProvider> |
| 4331 void UpdateToSpacePointersInParallel( | |
| 4332 Heap* heap, base::Semaphore* semaphore, | |
| 4333 const MarkingStateProvider& marking_state_provider) { | |
| 3949 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 4334 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
| 3950 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4335 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3951 Address space_start = heap->new_space()->bottom(); | 4336 Address space_start = heap->new_space()->bottom(); |
| 3952 Address space_end = heap->new_space()->top(); | 4337 Address space_end = heap->new_space()->top(); |
| 3953 for (Page* page : PageRange(space_start, space_end)) { | 4338 for (Page* page : PageRange(space_start, space_end)) { |
| 3954 Address start = | 4339 Address start = |
| 3955 page->Contains(space_start) ? space_start : page->area_start(); | 4340 page->Contains(space_start) ? space_start : page->area_start(); |
| 3956 Address end = page->Contains(space_end) ? space_end : page->area_end(); | 4341 Address end = page->Contains(space_end) ? space_end : page->area_end(); |
| 3957 job.AddPage(page, std::make_pair(start, end)); | 4342 job.AddPage(page, {start, end, marking_state_provider.marking_state(page)}); |
| 3958 } | 4343 } |
| 3959 PointersUpdatingVisitor visitor; | 4344 PointersUpdatingVisitor visitor; |
| 3960 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; | 4345 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
| 3961 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 4346 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
| 3962 } | 4347 } |
| 3963 | 4348 |
| 3964 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 4349 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| 3965 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 4350 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
| 3966 | 4351 |
| 3967 | 4352 |
| 3968 { | 4353 { |
| 3969 TRACE_GC(heap()->tracer(), | 4354 TRACE_GC(heap()->tracer(), |
| 3970 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 4355 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| 3971 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); | 4356 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| 4357 *this); | |
| 3972 // Update roots. | 4358 // Update roots. |
| 3973 PointersUpdatingVisitor updating_visitor; | 4359 PointersUpdatingVisitor updating_visitor; |
| 3974 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 4360 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| 3975 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | 4361 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
| 3976 } | 4362 } |
| 3977 | 4363 |
| 3978 { | 4364 { |
| 3979 Heap* heap = this->heap(); | 4365 Heap* heap = this->heap(); |
| 3980 TRACE_GC(heap->tracer(), | 4366 TRACE_GC(heap->tracer(), |
| 3981 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 4367 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
| 3982 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); | 4368 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); |
| 3983 } | 4369 } |
| 3984 | 4370 |
| 3985 { | 4371 { |
| 3986 TRACE_GC(heap()->tracer(), | 4372 TRACE_GC(heap()->tracer(), |
| 3987 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 4373 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
| 3988 // Update pointers from external string table. | 4374 // Update pointers from external string table. |
| 3989 heap_->UpdateReferencesInExternalStringTable( | 4375 heap_->UpdateReferencesInExternalStringTable( |
| 3990 &UpdateReferenceInExternalStringTableEntry); | 4376 &UpdateReferenceInExternalStringTableEntry); |
| 3991 | 4377 |
| 3992 EvacuationWeakObjectRetainer evacuation_object_retainer; | 4378 EvacuationWeakObjectRetainer evacuation_object_retainer; |
| 3993 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | 4379 heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
| 3994 } | 4380 } |
| 3995 } | 4381 } |
| 3996 | 4382 |
| 4383 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { | |
| 4384 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | |
| 4385 | |
| 4386 PointersUpdatingVisitor updating_visitor; | |
| 4387 | |
| 4388 { | |
| 4389 TRACE_GC(heap()->tracer(), | |
| 4390 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | |
| 4391 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, | |
| 4392 *this); | |
| 4393 // TODO(mlippautz): Iteration mode is not optimal as we process all | |
| 4394 // global handles. Find a way to only process the ones related to new | |
| 4395 // space. | |
| 4396 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | |
| 4397 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | |
| 4398 } | |
| 4399 | |
| 4400 { | |
| 4401 TRACE_GC(heap()->tracer(), | |
| 4402 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | |
| 4403 | |
| 4404 EvacuationWeakObjectRetainer evacuation_object_retainer; | |
| 4405 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | |
| 4406 | |
| 4407 // Update pointers from external string table. | |
| 4408 heap()->UpdateNewSpaceReferencesInExternalStringTable( | |
| 4409 &UpdateReferenceInExternalStringTableEntry); | |
| 4410 heap()->IterateEncounteredWeakCollections(&updating_visitor); | |
| 4411 heap()->set_encountered_weak_collections(Smi::kZero); | |
| 4412 } | |
| 4413 } | |
| 3997 | 4414 |
| 3998 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 4415 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| 3999 for (Page* p : old_space_evacuation_pages_) { | 4416 for (Page* p : old_space_evacuation_pages_) { |
| 4000 if (!p->IsEvacuationCandidate()) continue; | 4417 if (!p->IsEvacuationCandidate()) continue; |
| 4001 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 4418 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 4002 MarkingState::Internal(p).SetLiveBytes(0); | 4419 MarkingState::Internal(p).SetLiveBytes(0); |
| 4003 CHECK(p->SweepingDone()); | 4420 CHECK(p->SweepingDone()); |
| 4004 space->ReleasePage(p); | 4421 space->ReleasePage(p); |
| 4005 } | 4422 } |
| 4006 old_space_evacuation_pages_.Rewind(0); | 4423 old_space_evacuation_pages_.Rewind(0); |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4209 // The target is always in old space, we don't have to record the slot in | 4626 // The target is always in old space, we don't have to record the slot in |
| 4210 // the old-to-new remembered set. | 4627 // the old-to-new remembered set. |
| 4211 DCHECK(!heap()->InNewSpace(target)); | 4628 DCHECK(!heap()->InNewSpace(target)); |
| 4212 RecordRelocSlot(host, &rinfo, target); | 4629 RecordRelocSlot(host, &rinfo, target); |
| 4213 } | 4630 } |
| 4214 } | 4631 } |
| 4215 } | 4632 } |
| 4216 | 4633 |
| 4217 } // namespace internal | 4634 } // namespace internal |
| 4218 } // namespace v8 | 4635 } // namespace v8 |
| OLD | NEW |