Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 203 CHECK(ObjectMarking::IsBlackOrGrey(object, marking_state(object))); | 203 CHECK(ObjectMarking::IsBlackOrGrey(object, marking_state(object))); |
| 204 } | 204 } |
| 205 } | 205 } |
| 206 } | 206 } |
| 207 }; | 207 }; |
| 208 | 208 |
| 209 class EvacuationVerifier : public ObjectVisitor { | 209 class EvacuationVerifier : public ObjectVisitor { |
| 210 public: | 210 public: |
| 211 virtual void Run() = 0; | 211 virtual void Run() = 0; |
| 212 | 212 |
| 213 void VisitPointers(Object** start, Object** end) override { | |
| 214 for (Object** current = start; current < end; current++) { | |
| 215 if ((*current)->IsHeapObject()) { | |
| 216 HeapObject* object = HeapObject::cast(*current); | |
| 217 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | |
| 218 } | |
| 219 } | |
| 220 } | |
| 221 | |
| 222 protected: | 213 protected: |
| 223 explicit EvacuationVerifier(Heap* heap) : heap_(heap) {} | 214 explicit EvacuationVerifier(Heap* heap) : heap_(heap) {} |
| 224 | 215 |
| 225 void VerifyRoots(VisitMode mode); | 216 void VerifyRoots(VisitMode mode); |
| 226 void VerifyEvacuationOnPage(Address start, Address end); | 217 void VerifyEvacuationOnPage(Address start, Address end); |
| 227 void VerifyEvacuation(NewSpace* new_space); | 218 void VerifyEvacuation(NewSpace* new_space); |
| 228 void VerifyEvacuation(PagedSpace* paged_space); | 219 void VerifyEvacuation(PagedSpace* paged_space); |
| 229 | 220 |
| 230 Heap* heap_; | 221 Heap* heap_; |
| 231 }; | 222 }; |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 270 public: | 261 public: |
| 271 explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {} | 262 explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {} |
| 272 | 263 |
| 273 void Run() override { | 264 void Run() override { |
| 274 VerifyRoots(VISIT_ALL); | 265 VerifyRoots(VISIT_ALL); |
| 275 VerifyEvacuation(heap_->new_space()); | 266 VerifyEvacuation(heap_->new_space()); |
| 276 VerifyEvacuation(heap_->old_space()); | 267 VerifyEvacuation(heap_->old_space()); |
| 277 VerifyEvacuation(heap_->code_space()); | 268 VerifyEvacuation(heap_->code_space()); |
| 278 VerifyEvacuation(heap_->map_space()); | 269 VerifyEvacuation(heap_->map_space()); |
| 279 } | 270 } |
| 271 | |
| 272 void VisitPointers(Object** start, Object** end) override { | |
| 273 for (Object** current = start; current < end; current++) { | |
| 274 if ((*current)->IsHeapObject()) { | |
| 275 HeapObject* object = HeapObject::cast(*current); | |
| 276 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | |
| 277 } | |
| 278 } | |
| 279 } | |
| 280 }; | |
| 281 | |
| 282 class YoungGenerationEvacuationVerifier : public EvacuationVerifier { | |
| 283 public: | |
| 284 explicit YoungGenerationEvacuationVerifier(Heap* heap) | |
| 285 : EvacuationVerifier(heap) {} | |
| 286 | |
| 287 void Run() override { | |
| 288 VerifyRoots(VISIT_ALL_IN_SCAVENGE); | |
| 289 VerifyEvacuation(heap_->new_space()); | |
| 290 } | |
| 291 | |
| 292 void VisitPointers(Object** start, Object** end) override { | |
| 293 for (Object** current = start; current < end; current++) { | |
| 294 if ((*current)->IsHeapObject()) { | |
| 295 HeapObject* object = HeapObject::cast(*current); | |
| 296 if (!heap_->InNewSpace(object)) return; | |
| 297 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | |
| 298 } | |
| 299 } | |
| 300 } | |
| 280 }; | 301 }; |
| 281 | 302 |
| 282 } // namespace | 303 } // namespace |
| 283 #endif // VERIFY_HEAP | 304 #endif // VERIFY_HEAP |
| 284 | 305 |
| 285 // ============================================================================= | 306 // ============================================================================= |
| 286 // MarkCompactCollector | 307 // MarkCompactCollector |
| 287 // ============================================================================= | 308 // ============================================================================= |
| 288 | 309 |
| 289 MarkCompactCollector::MarkCompactCollector(Heap* heap) | 310 MarkCompactCollector::MarkCompactCollector(Heap* heap) |
| (...skipping 1251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1541 | 1562 |
| 1542 private: | 1563 private: |
| 1543 Heap* heap_; | 1564 Heap* heap_; |
| 1544 int pointers_removed_; | 1565 int pointers_removed_; |
| 1545 HeapObject* table_; | 1566 HeapObject* table_; |
| 1546 }; | 1567 }; |
| 1547 | 1568 |
| 1548 typedef StringTableCleaner<false, true> InternalizedStringTableCleaner; | 1569 typedef StringTableCleaner<false, true> InternalizedStringTableCleaner; |
| 1549 typedef StringTableCleaner<true, false> ExternalStringTableCleaner; | 1570 typedef StringTableCleaner<true, false> ExternalStringTableCleaner; |
| 1550 | 1571 |
| 1572 // Helper class for pruning the string table. | |
| 1573 class YoungGenerationExternalStringTableCleaner : public ObjectVisitor { | |
| 1574 public: | |
| 1575 YoungGenerationExternalStringTableCleaner( | |
| 1576 const MinorMarkCompactCollector& collector) | |
| 1577 : heap_(collector.heap()), collector_(collector) {} | |
| 1578 | |
| 1579 void VisitPointers(Object** start, Object** end) override { | |
| 1580 // Visit all HeapObject pointers in [start, end). | |
| 1581 for (Object** p = start; p < end; p++) { | |
| 1582 Object* o = *p; | |
| 1583 if (o->IsHeapObject()) { | |
| 1584 HeapObject* heap_object = HeapObject::cast(o); | |
| 1585 if (ObjectMarking::IsWhite(heap_object, | |
| 1586 collector_.marking_state(heap_object))) { | |
| 1587 if (o->IsExternalString()) { | |
| 1588 heap_->FinalizeExternalString(String::cast(*p)); | |
| 1589 } else { | |
| 1590 // The original external string may have been internalized. | |
| 1591 DCHECK(o->IsThinString()); | |
| 1592 } | |
| 1593 // Set the entry to the_hole_value (as deleted). | |
| 1594 *p = heap_->the_hole_value(); | |
| 1595 } | |
| 1596 } | |
| 1597 } | |
| 1598 } | |
| 1599 | |
| 1600 private: | |
| 1601 Heap* heap_; | |
| 1602 const MinorMarkCompactCollector& collector_; | |
| 1603 }; | |
| 1604 | |
| 1605 // Marked young generation objects and all old generation objects will be | |
| 1606 // retained. | |
| 1607 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer { | |
| 1608 public: | |
| 1609 explicit MinorMarkCompactWeakObjectRetainer( | |
| 1610 const MinorMarkCompactCollector& collector) | |
| 1611 : collector_(collector) {} | |
| 1612 | |
| 1613 virtual Object* RetainAs(Object* object) { | |
| 1614 HeapObject* heap_object = HeapObject::cast(object); | |
| 1615 if (!collector_.heap()->InNewSpace(heap_object)) return object; | |
| 1616 | |
| 1617 DCHECK(!ObjectMarking::IsGrey(heap_object, | |
| 1618 MarkingState::External(heap_object))); | |
| 1619 if (ObjectMarking::IsBlack(heap_object, | |
| 1620 collector_.marking_state(heap_object))) { | |
| 1621 return object; | |
| 1622 } | |
| 1623 return nullptr; | |
| 1624 } | |
| 1625 | |
| 1626 private: | |
| 1627 const MinorMarkCompactCollector& collector_; | |
| 1628 }; | |
| 1629 | |
| 1551 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects | 1630 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
| 1552 // are retained. | 1631 // are retained. |
| 1553 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { | 1632 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| 1554 public: | 1633 public: |
| 1555 virtual Object* RetainAs(Object* object) { | 1634 virtual Object* RetainAs(Object* object) { |
| 1556 HeapObject* heap_object = HeapObject::cast(object); | 1635 HeapObject* heap_object = HeapObject::cast(object); |
| 1557 DCHECK(!ObjectMarking::IsGrey(heap_object, | 1636 DCHECK(!ObjectMarking::IsGrey(heap_object, |
| 1558 MarkingState::Internal(heap_object))); | 1637 MarkingState::Internal(heap_object))); |
| 1559 if (ObjectMarking::IsBlack(heap_object, | 1638 if (ObjectMarking::IsBlack(heap_object, |
| 1560 MarkingState::Internal(heap_object))) { | 1639 MarkingState::Internal(heap_object))) { |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1599 LiveObjectIterator<kGreyObjects> it(p, MarkingState::Internal(p)); | 1678 LiveObjectIterator<kGreyObjects> it(p, MarkingState::Internal(p)); |
| 1600 HeapObject* object = NULL; | 1679 HeapObject* object = NULL; |
| 1601 while ((object = it.Next()) != NULL) { | 1680 while ((object = it.Next()) != NULL) { |
| 1602 DCHECK(ObjectMarking::IsGrey(object, MarkingState::Internal(object))); | 1681 DCHECK(ObjectMarking::IsGrey(object, MarkingState::Internal(object))); |
| 1603 ObjectMarking::GreyToBlack(object, MarkingState::Internal(object)); | 1682 ObjectMarking::GreyToBlack(object, MarkingState::Internal(object)); |
| 1604 PushBlack(object); | 1683 PushBlack(object); |
| 1605 if (marking_deque()->IsFull()) return; | 1684 if (marking_deque()->IsFull()) return; |
| 1606 } | 1685 } |
| 1607 } | 1686 } |
| 1608 | 1687 |
| 1609 class RecordMigratedSlotVisitor final : public ObjectVisitor { | 1688 class RecordMigratedSlotVisitor : public ObjectVisitor { |
| 1610 public: | 1689 public: |
| 1690 class HostScope { | |
| 1691 public: | |
| 1692 HostScope(RecordMigratedSlotVisitor* visitor, HeapObject* object) | |
| 1693 : visitor_(visitor) { | |
| 1694 DCHECK_NOT_NULL(object); | |
| 1695 visitor_->set_host(object); | |
| 1696 } | |
| 1697 ~HostScope() { visitor_->set_host(nullptr); } | |
| 1698 | |
| 1699 private: | |
| 1700 RecordMigratedSlotVisitor* visitor_; | |
| 1701 }; | |
| 1702 | |
| 1611 explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector) | 1703 explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector) |
| 1612 : collector_(collector) {} | 1704 : collector_(collector), host_(nullptr) {} |
| 1613 | 1705 |
| 1614 inline void VisitPointer(Object** p) final { | 1706 inline void VisitPointer(Object** p) final { |
| 1615 RecordMigratedSlot(*p, reinterpret_cast<Address>(p)); | 1707 RecordMigratedSlot(*p, reinterpret_cast<Address>(p)); |
| 1616 } | 1708 } |
| 1617 | 1709 |
| 1618 inline void VisitPointers(Object** start, Object** end) final { | 1710 inline void VisitPointers(Object** start, Object** end) final { |
| 1619 while (start < end) { | 1711 while (start < end) { |
| 1620 RecordMigratedSlot(*start, reinterpret_cast<Address>(start)); | 1712 RecordMigratedSlot(*start, reinterpret_cast<Address>(start)); |
| 1621 ++start; | 1713 ++start; |
| 1622 } | 1714 } |
| 1623 } | 1715 } |
| 1624 | 1716 |
| 1625 inline void VisitCodeEntry(Address code_entry_slot) final { | 1717 inline void VisitCodeEntry(Address code_entry_slot) override { |
| 1626 Address code_entry = Memory::Address_at(code_entry_slot); | 1718 Address code_entry = Memory::Address_at(code_entry_slot); |
| 1627 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { | 1719 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
| 1628 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), | 1720 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), |
| 1629 nullptr, CODE_ENTRY_SLOT, | 1721 nullptr, CODE_ENTRY_SLOT, |
| 1630 code_entry_slot); | 1722 code_entry_slot); |
| 1631 } | 1723 } |
| 1632 } | 1724 } |
| 1633 | 1725 |
| 1634 inline void VisitCodeTarget(RelocInfo* rinfo) final { | 1726 inline void VisitCodeTarget(RelocInfo* rinfo) override { |
| 1635 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); | 1727 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); |
| 1636 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); | 1728 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
| 1637 Code* host = rinfo->host(); | 1729 Code* host = rinfo->host(); |
| 1638 // The target is always in old space, we don't have to record the slot in | 1730 // The target is always in old space, we don't have to record the slot in |
| 1639 // the old-to-new remembered set. | 1731 // the old-to-new remembered set. |
| 1640 DCHECK(!collector_->heap()->InNewSpace(target)); | 1732 DCHECK(!collector_->heap()->InNewSpace(target)); |
| 1641 collector_->RecordRelocSlot(host, rinfo, target); | 1733 collector_->RecordRelocSlot(host, rinfo, target); |
| 1642 } | 1734 } |
| 1643 | 1735 |
| 1644 inline void VisitDebugTarget(RelocInfo* rinfo) final { | 1736 inline void VisitDebugTarget(RelocInfo* rinfo) override { |
| 1645 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && | 1737 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
| 1646 rinfo->IsPatchedDebugBreakSlotSequence()); | 1738 rinfo->IsPatchedDebugBreakSlotSequence()); |
| 1647 Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address()); | 1739 Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address()); |
| 1648 Code* host = rinfo->host(); | 1740 Code* host = rinfo->host(); |
| 1649 // The target is always in old space, we don't have to record the slot in | 1741 // The target is always in old space, we don't have to record the slot in |
| 1650 // the old-to-new remembered set. | 1742 // the old-to-new remembered set. |
| 1651 DCHECK(!collector_->heap()->InNewSpace(target)); | 1743 DCHECK(!collector_->heap()->InNewSpace(target)); |
| 1652 collector_->RecordRelocSlot(host, rinfo, target); | 1744 collector_->RecordRelocSlot(host, rinfo, target); |
| 1653 } | 1745 } |
| 1654 | 1746 |
| 1655 inline void VisitEmbeddedPointer(RelocInfo* rinfo) final { | 1747 inline void VisitEmbeddedPointer(RelocInfo* rinfo) override { |
| 1656 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); | 1748 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); |
| 1657 HeapObject* object = HeapObject::cast(rinfo->target_object()); | 1749 HeapObject* object = HeapObject::cast(rinfo->target_object()); |
| 1658 Code* host = rinfo->host(); | 1750 Code* host = rinfo->host(); |
| 1659 collector_->heap()->RecordWriteIntoCode(host, rinfo, object); | 1751 collector_->heap()->RecordWriteIntoCode(host, rinfo, object); |
| 1660 collector_->RecordRelocSlot(host, rinfo, object); | 1752 collector_->RecordRelocSlot(host, rinfo, object); |
| 1661 } | 1753 } |
| 1662 | 1754 |
| 1663 inline void VisitCell(RelocInfo* rinfo) final { | 1755 inline void VisitCell(RelocInfo* rinfo) override { |
| 1664 DCHECK(rinfo->rmode() == RelocInfo::CELL); | 1756 DCHECK(rinfo->rmode() == RelocInfo::CELL); |
| 1665 Cell* cell = rinfo->target_cell(); | 1757 Cell* cell = rinfo->target_cell(); |
| 1666 Code* host = rinfo->host(); | 1758 Code* host = rinfo->host(); |
| 1667 // The cell is always in old space, we don't have to record the slot in | 1759 // The cell is always in old space, we don't have to record the slot in |
| 1668 // the old-to-new remembered set. | 1760 // the old-to-new remembered set. |
| 1669 DCHECK(!collector_->heap()->InNewSpace(cell)); | 1761 DCHECK(!collector_->heap()->InNewSpace(cell)); |
| 1670 collector_->RecordRelocSlot(host, rinfo, cell); | 1762 collector_->RecordRelocSlot(host, rinfo, cell); |
| 1671 } | 1763 } |
| 1672 | 1764 |
| 1673 // Entries that will never move. | 1765 // Entries that will never move. |
| 1674 inline void VisitCodeAgeSequence(RelocInfo* rinfo) final { | 1766 inline void VisitCodeAgeSequence(RelocInfo* rinfo) override { |
| 1675 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); | 1767 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); |
| 1676 Code* stub = rinfo->code_age_stub(); | 1768 Code* stub = rinfo->code_age_stub(); |
| 1677 USE(stub); | 1769 USE(stub); |
| 1678 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); | 1770 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); |
| 1679 } | 1771 } |
| 1680 | 1772 |
| 1681 // Entries that are skipped for recording. | 1773 // Entries that are skipped for recording. |
| 1682 inline void VisitExternalReference(RelocInfo* rinfo) final {} | 1774 inline void VisitExternalReference(RelocInfo* rinfo) final {} |
| 1683 inline void VisitExternalReference(Address* p) final {} | 1775 inline void VisitExternalReference(Address* p) final {} |
| 1684 inline void VisitRuntimeEntry(RelocInfo* rinfo) final {} | 1776 inline void VisitRuntimeEntry(RelocInfo* rinfo) final {} |
| 1685 inline void VisitInternalReference(RelocInfo* rinfo) final {} | 1777 inline void VisitInternalReference(RelocInfo* rinfo) final {} |
| 1686 | 1778 |
| 1687 private: | 1779 protected: |
| 1688 inline void RecordMigratedSlot(Object* value, Address slot) { | 1780 void set_host(HeapObject* host) { host_ = host; } |
| 1781 | |
| 1782 inline virtual void RecordMigratedSlot(Object* value, Address slot) { | |
| 1689 if (value->IsHeapObject()) { | 1783 if (value->IsHeapObject()) { |
| 1690 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); | 1784 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); |
| 1691 if (p->InNewSpace()) { | 1785 if (p->InNewSpace()) { |
| 1692 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); | 1786 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); |
| 1693 } else if (p->IsEvacuationCandidate()) { | 1787 } else if (p->IsEvacuationCandidate()) { |
| 1694 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); | 1788 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); |
| 1695 } | 1789 } |
| 1696 } | 1790 } |
| 1697 } | 1791 } |
| 1698 | 1792 |
| 1699 MarkCompactCollector* collector_; | 1793 MarkCompactCollector* collector_; |
| 1794 HeapObject* host_; | |
| 1795 }; | |
| 1796 | |
| 1797 class YoungGenerationRecordMigratedSlotVisitor final | |
| 1798 : public RecordMigratedSlotVisitor { | |
| 1799 public: | |
| 1800 explicit YoungGenerationRecordMigratedSlotVisitor( | |
| 1801 MarkCompactCollector* collector) | |
| 1802 : RecordMigratedSlotVisitor(collector) {} | |
| 1803 | |
| 1804 inline void VisitCodeEntry(Address code_entry_slot) final { | |
| 1805 Address code_entry = Memory::Address_at(code_entry_slot); | |
| 1806 if (Page::FromAddress(code_entry)->IsEvacuationCandidate() && | |
| 1807 IsHostObjectLive()) { | |
| 1808 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), | |
| 1809 nullptr, CODE_ENTRY_SLOT, | |
| 1810 code_entry_slot); | |
| 1811 } | |
| 1812 } | |
| 1813 | |
| 1814 inline void VisitCodeTarget(RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1815 inline void VisitDebugTarget(RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1816 inline void VisitEmbeddedPointer(RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1817 inline void VisitCell(RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1818 inline void VisitCodeAgeSequence(RelocInfo* rinfo) final { UNREACHABLE(); } | |
| 1819 | |
| 1820 private: | |
| 1821 // Only record slots for host objects that are considered as live by the full | |
| 1822 // collector. | |
| 1823 inline bool IsHostObjectLive() { | |
| 1824 DCHECK_NOT_NULL(host_); | |
| 1825 return ObjectMarking::IsBlack(host_, collector_->marking_state(host_)); | |
| 1826 } | |
| 1827 | |
| 1828 inline void RecordMigratedSlot(Object* value, Address slot) final { | |
| 1829 if (value->IsHeapObject()) { | |
| 1830 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); | |
| 1831 if (p->InNewSpace()) { | |
| 1832 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); | |
| 1833 } else if (p->IsEvacuationCandidate() && IsHostObjectLive()) { | |
| 1834 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); | |
| 1835 } | |
| 1836 } | |
| 1837 } | |
| 1700 }; | 1838 }; |
| 1701 | 1839 |
| 1702 class HeapObjectVisitor { | 1840 class HeapObjectVisitor { |
| 1703 public: | 1841 public: |
| 1704 virtual ~HeapObjectVisitor() {} | 1842 virtual ~HeapObjectVisitor() {} |
| 1705 virtual bool Visit(HeapObject* object) = 0; | 1843 virtual bool Visit(HeapObject* object) = 0; |
| 1706 }; | 1844 }; |
| 1707 | 1845 |
| 1846 class MigrationObserver { | |
| 1847 public: | |
| 1848 explicit MigrationObserver(Heap* heap) : heap_(heap) {} | |
| 1849 | |
| 1850 virtual ~MigrationObserver() {} | |
| 1851 virtual inline void Move(HeapObject* src, HeapObject* dst) {} | |
| 1852 | |
| 1853 protected: | |
| 1854 Heap* heap_; | |
| 1855 }; | |
| 1856 | |
| 1857 class YoungGenerationMigrationObserver : public MigrationObserver { | |
| 1858 public: | |
| 1859 YoungGenerationMigrationObserver( | |
| 1860 Heap* heap, MarkCompactCollector* mark_compact_collector, | |
| 1861 std::vector<HeapObject*>* black_allocation_objects) | |
| 1862 : MigrationObserver(heap), | |
| 1863 mark_compact_collector_(mark_compact_collector), | |
| 1864 black_allocation_objects_(black_allocation_objects) {} | |
| 1865 | |
| 1866 inline void Move(HeapObject* src, HeapObject* dst) final { | |
| 1867 // Migrate color to old generation marking in case the object survived young | |
| 1868 // generation garbage collection. | |
| 1869 if (heap_->incremental_marking()->IsMarking()) { | |
| 1870 const MarkingState state = mark_compact_collector_->marking_state(dst); | |
| 1871 if (heap_->incremental_marking()->black_allocation() && | |
| 1872 ObjectMarking::IsBlack(dst, state)) { | |
| 1873 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 1874 black_allocation_objects_->push_back(dst); | |
| 1875 } | |
| 1876 | |
| 1877 // Transfer old generation marking state. | |
| 1878 if (!ObjectMarking::IsBlack(dst, state)) { | |
| 1879 IncrementalMarking::TransferColor<MarkBit::ATOMIC>(src, dst); | |
| 1880 } | |
| 1881 } | |
| 1882 } | |
| 1883 | |
| 1884 protected: | |
| 1885 base::Mutex mutex_; | |
| 1886 MarkCompactCollector* mark_compact_collector_; | |
| 1887 std::vector<HeapObject*>* black_allocation_objects_; | |
| 1888 }; | |
| 1889 | |
| 1708 class EvacuateVisitorBase : public HeapObjectVisitor { | 1890 class EvacuateVisitorBase : public HeapObjectVisitor { |
| 1709 protected: | 1891 protected: |
| 1710 enum MigrationMode { kFast, kProfiled }; | 1892 enum MigrationMode { kFast, kProfiled }; |
| 1711 | 1893 |
| 1712 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, | 1894 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, |
| 1713 RecordMigratedSlotVisitor* record_visitor) | 1895 RecordMigratedSlotVisitor* record_visitor, |
| 1896 MigrationObserver* migration_observer) | |
| 1714 : heap_(heap), | 1897 : heap_(heap), |
| 1715 compaction_spaces_(compaction_spaces), | 1898 compaction_spaces_(compaction_spaces), |
| 1716 record_visitor_(record_visitor), | 1899 record_visitor_(record_visitor), |
| 1900 migration_observer_(migration_observer), | |
| 1717 profiling_( | 1901 profiling_( |
| 1718 heap->isolate()->is_profiling() || | 1902 heap->isolate()->is_profiling() || |
| 1719 heap->isolate()->logger()->is_logging_code_events() || | 1903 heap->isolate()->logger()->is_logging_code_events() || |
| 1720 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {} | 1904 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {} |
| 1721 | 1905 |
| 1722 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, | 1906 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, |
| 1723 HeapObject** target_object) { | 1907 HeapObject** target_object) { |
| 1724 #ifdef VERIFY_HEAP | 1908 #ifdef VERIFY_HEAP |
| 1725 if (AbortCompactionForTesting(object)) return false; | 1909 if (AbortCompactionForTesting(object)) return false; |
| 1726 #endif // VERIFY_HEAP | 1910 #endif // VERIFY_HEAP |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 1751 DCHECK(heap_->AllowedToBeMigrated(src, dest)); | 1935 DCHECK(heap_->AllowedToBeMigrated(src, dest)); |
| 1752 DCHECK(dest != LO_SPACE); | 1936 DCHECK(dest != LO_SPACE); |
| 1753 if (dest == OLD_SPACE) { | 1937 if (dest == OLD_SPACE) { |
| 1754 DCHECK_OBJECT_SIZE(size); | 1938 DCHECK_OBJECT_SIZE(size); |
| 1755 DCHECK(IsAligned(size, kPointerSize)); | 1939 DCHECK(IsAligned(size, kPointerSize)); |
| 1756 heap_->CopyBlock(dst_addr, src_addr, size); | 1940 heap_->CopyBlock(dst_addr, src_addr, size); |
| 1757 if ((mode == kProfiled) && dst->IsBytecodeArray()) { | 1941 if ((mode == kProfiled) && dst->IsBytecodeArray()) { |
| 1758 PROFILE(heap_->isolate(), | 1942 PROFILE(heap_->isolate(), |
| 1759 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); | 1943 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); |
| 1760 } | 1944 } |
| 1761 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); | 1945 migration_observer_->Move(src, dst); |
| 1946 { | |
| 1947 RecordMigratedSlotVisitor::HostScope host_scope(record_visitor_, dst); | |
|
Michael Lippautz
2017/04/21 07:05:52
This hack and all its consequences (stateful visit
Hannes Payer (out of office)
2017/04/21 14:46:26
Nice!
| |
| 1948 dst->IterateBodyFast(dst->map()->instance_type(), size, | |
| 1949 record_visitor_); | |
| 1950 } | |
| 1762 } else if (dest == CODE_SPACE) { | 1951 } else if (dest == CODE_SPACE) { |
| 1763 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space()); | 1952 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space()); |
| 1764 if (mode == kProfiled) { | 1953 if (mode == kProfiled) { |
| 1765 PROFILE(heap_->isolate(), | 1954 PROFILE(heap_->isolate(), |
| 1766 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); | 1955 CodeMoveEvent(AbstractCode::cast(src), dst_addr)); |
| 1767 } | 1956 } |
| 1768 heap_->CopyBlock(dst_addr, src_addr, size); | 1957 heap_->CopyBlock(dst_addr, src_addr, size); |
| 1769 Code::cast(dst)->Relocate(dst_addr - src_addr); | 1958 Code::cast(dst)->Relocate(dst_addr - src_addr); |
| 1770 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); | 1959 migration_observer_->Move(src, dst); |
|
Michael Lippautz
2017/04/21 07:05:52
I plan to make the use of MigrationObserver templa
Hannes Payer (out of office)
2017/04/21 14:46:26
During evacuation you mean, right?
Michael Lippautz
2017/04/24 13:15:08
Yes.
| |
| 1771 dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); | 1960 { |
| 1961 RecordMigratedSlotVisitor::HostScope host_scope(record_visitor_, dst); | |
| 1962 dst->IterateBodyFast(dst->map()->instance_type(), size, | |
| 1963 record_visitor_); | |
| 1964 } | |
| 1772 } else { | 1965 } else { |
| 1773 DCHECK_OBJECT_SIZE(size); | 1966 DCHECK_OBJECT_SIZE(size); |
| 1774 DCHECK(dest == NEW_SPACE); | 1967 DCHECK(dest == NEW_SPACE); |
| 1775 heap_->CopyBlock(dst_addr, src_addr, size); | 1968 heap_->CopyBlock(dst_addr, src_addr, size); |
| 1969 migration_observer_->Move(src, dst); | |
| 1776 } | 1970 } |
| 1777 if (mode == kProfiled) { | 1971 if (mode == kProfiled) { |
| 1778 heap_->OnMoveEvent(dst, src, size); | 1972 heap_->OnMoveEvent(dst, src, size); |
| 1779 } | 1973 } |
| 1780 base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr), | 1974 base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr), |
| 1781 reinterpret_cast<base::AtomicWord>(dst_addr)); | 1975 reinterpret_cast<base::AtomicWord>(dst_addr)); |
| 1782 } | 1976 } |
| 1783 | 1977 |
| 1784 #ifdef VERIFY_HEAP | 1978 #ifdef VERIFY_HEAP |
| 1785 bool AbortCompactionForTesting(HeapObject* object) { | 1979 bool AbortCompactionForTesting(HeapObject* object) { |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 1797 } | 1991 } |
| 1798 } | 1992 } |
| 1799 } | 1993 } |
| 1800 return false; | 1994 return false; |
| 1801 } | 1995 } |
| 1802 #endif // VERIFY_HEAP | 1996 #endif // VERIFY_HEAP |
| 1803 | 1997 |
| 1804 Heap* heap_; | 1998 Heap* heap_; |
| 1805 CompactionSpaceCollection* compaction_spaces_; | 1999 CompactionSpaceCollection* compaction_spaces_; |
| 1806 RecordMigratedSlotVisitor* record_visitor_; | 2000 RecordMigratedSlotVisitor* record_visitor_; |
| 2001 MigrationObserver* migration_observer_; | |
| 1807 bool profiling_; | 2002 bool profiling_; |
| 1808 }; | 2003 }; |
| 1809 | 2004 |
| 1810 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { | 2005 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { |
| 1811 public: | 2006 public: |
| 1812 static const intptr_t kLabSize = 4 * KB; | 2007 static const intptr_t kLabSize = 4 * KB; |
| 1813 static const intptr_t kMaxLabObjectSize = 256; | 2008 static const intptr_t kMaxLabObjectSize = 256; |
| 1814 | 2009 |
| 1815 explicit EvacuateNewSpaceVisitor(Heap* heap, | 2010 explicit EvacuateNewSpaceVisitor(Heap* heap, |
| 1816 CompactionSpaceCollection* compaction_spaces, | 2011 CompactionSpaceCollection* compaction_spaces, |
| 1817 RecordMigratedSlotVisitor* record_visitor, | 2012 RecordMigratedSlotVisitor* record_visitor, |
| 2013 MigrationObserver* migration_observer, | |
| 1818 base::HashMap* local_pretenuring_feedback) | 2014 base::HashMap* local_pretenuring_feedback) |
| 1819 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor), | 2015 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, |
| 2016 migration_observer), | |
| 1820 buffer_(LocalAllocationBuffer::InvalidBuffer()), | 2017 buffer_(LocalAllocationBuffer::InvalidBuffer()), |
| 1821 space_to_allocate_(NEW_SPACE), | 2018 space_to_allocate_(NEW_SPACE), |
| 1822 promoted_size_(0), | 2019 promoted_size_(0), |
| 1823 semispace_copied_size_(0), | 2020 semispace_copied_size_(0), |
| 1824 local_pretenuring_feedback_(local_pretenuring_feedback) {} | 2021 local_pretenuring_feedback_(local_pretenuring_feedback) {} |
| 1825 | 2022 |
| 1826 inline bool Visit(HeapObject* object) override { | 2023 inline bool Visit(HeapObject* object) override { |
| 1827 heap_->UpdateAllocationSite<Heap::kCached>(object, | 2024 heap_->UpdateAllocationSite<Heap::kCached>(object, |
| 1828 local_pretenuring_feedback_); | 2025 local_pretenuring_feedback_); |
| 1829 int size = object->Size(); | 2026 int size = object->Size(); |
| (...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1992 Heap* heap_; | 2189 Heap* heap_; |
| 1993 RecordMigratedSlotVisitor* record_visitor_; | 2190 RecordMigratedSlotVisitor* record_visitor_; |
| 1994 intptr_t moved_bytes_; | 2191 intptr_t moved_bytes_; |
| 1995 base::HashMap* local_pretenuring_feedback_; | 2192 base::HashMap* local_pretenuring_feedback_; |
| 1996 }; | 2193 }; |
| 1997 | 2194 |
| 1998 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { | 2195 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { |
| 1999 public: | 2196 public: |
| 2000 EvacuateOldSpaceVisitor(Heap* heap, | 2197 EvacuateOldSpaceVisitor(Heap* heap, |
| 2001 CompactionSpaceCollection* compaction_spaces, | 2198 CompactionSpaceCollection* compaction_spaces, |
| 2002 RecordMigratedSlotVisitor* record_visitor) | 2199 RecordMigratedSlotVisitor* record_visitor, |
| 2003 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor) {} | 2200 MigrationObserver* migration_observer) |
| 2201 : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, | |
| 2202 migration_observer) {} | |
| 2004 | 2203 |
| 2005 inline bool Visit(HeapObject* object) override { | 2204 inline bool Visit(HeapObject* object) override { |
| 2006 CompactionSpace* target_space = compaction_spaces_->Get( | 2205 CompactionSpace* target_space = compaction_spaces_->Get( |
| 2007 Page::FromAddress(object->address())->owner()->identity()); | 2206 Page::FromAddress(object->address())->owner()->identity()); |
| 2008 HeapObject* target_object = nullptr; | 2207 HeapObject* target_object = nullptr; |
| 2009 if (TryEvacuateObject(target_space, object, &target_object)) { | 2208 if (TryEvacuateObject(target_space, object, &target_object)) { |
| 2010 DCHECK(object->map_word().IsForwardingAddress()); | 2209 DCHECK(object->map_word().IsForwardingAddress()); |
| 2011 return true; | 2210 return true; |
| 2012 } | 2211 } |
| 2013 return false; | 2212 return false; |
| (...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2352 heap_object); | 2551 heap_object); |
| 2353 return KEEP_SLOT; | 2552 return KEEP_SLOT; |
| 2354 } | 2553 } |
| 2355 return REMOVE_SLOT; | 2554 return REMOVE_SLOT; |
| 2356 } | 2555 } |
| 2357 | 2556 |
| 2358 static bool IsUnmarkedObject(Heap* heap, Object** p) { | 2557 static bool IsUnmarkedObject(Heap* heap, Object** p) { |
| 2359 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); | 2558 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); |
| 2360 return heap->InNewSpace(*p) && | 2559 return heap->InNewSpace(*p) && |
| 2361 !ObjectMarking::IsBlack(HeapObject::cast(*p), | 2560 !ObjectMarking::IsBlack(HeapObject::cast(*p), |
| 2362 MarkingState::Internal(HeapObject::cast(*p))); | 2561 MarkingState::External(HeapObject::cast(*p))); |
| 2363 } | 2562 } |
| 2364 | 2563 |
| 2365 void MinorMarkCompactCollector::MarkLiveObjects() { | 2564 void MinorMarkCompactCollector::MarkLiveObjects() { |
| 2366 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); | 2565 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); |
| 2367 | 2566 |
| 2368 PostponeInterruptsScope postpone(isolate()); | 2567 PostponeInterruptsScope postpone(isolate()); |
| 2369 | 2568 |
| 2370 StaticYoungGenerationMarkingVisitor::Initialize(heap()); | 2569 StaticYoungGenerationMarkingVisitor::Initialize(heap()); |
| 2371 RootMarkingVisitor root_visitor(this); | 2570 RootMarkingVisitor root_visitor(this); |
| 2372 | 2571 |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2404 heap()->VisitEncounteredWeakCollections(&root_visitor); | 2603 heap()->VisitEncounteredWeakCollections(&root_visitor); |
| 2405 ProcessMarkingDeque(); | 2604 ProcessMarkingDeque(); |
| 2406 } | 2605 } |
| 2407 | 2606 |
| 2408 { | 2607 { |
| 2409 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); | 2608 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); |
| 2410 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( | 2609 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
| 2411 &IsUnmarkedObject); | 2610 &IsUnmarkedObject); |
| 2412 isolate() | 2611 isolate() |
| 2413 ->global_handles() | 2612 ->global_handles() |
| 2414 ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>( | 2613 ->IterateNewSpaceWeakUnmodifiedRoots< |
| 2415 &root_visitor); | 2614 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor); |
| 2416 ProcessMarkingDeque(); | 2615 ProcessMarkingDeque(); |
| 2417 } | 2616 } |
| 2418 | 2617 |
| 2419 marking_deque()->StopUsing(); | 2618 marking_deque()->StopUsing(); |
| 2420 } | 2619 } |
| 2421 | 2620 |
| 2422 void MinorMarkCompactCollector::ProcessMarkingDeque() { | 2621 void MinorMarkCompactCollector::ProcessMarkingDeque() { |
| 2423 EmptyMarkingDeque(); | 2622 EmptyMarkingDeque(); |
| 2424 DCHECK(!marking_deque()->overflowed()); | 2623 DCHECK(!marking_deque()->overflowed()); |
| 2425 DCHECK(marking_deque()->IsEmpty()); | 2624 DCHECK(marking_deque()->IsEmpty()); |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 2437 object, MarkingState::External(object)))); | 2636 object, MarkingState::External(object)))); |
| 2438 | 2637 |
| 2439 Map* map = object->map(); | 2638 Map* map = object->map(); |
| 2440 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( | 2639 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( |
| 2441 object, MarkingState::External(object)))); | 2640 object, MarkingState::External(object)))); |
| 2442 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); | 2641 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); |
| 2443 } | 2642 } |
| 2444 } | 2643 } |
| 2445 | 2644 |
| 2446 void MinorMarkCompactCollector::CollectGarbage() { | 2645 void MinorMarkCompactCollector::CollectGarbage() { |
| 2646 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); | |
| 2647 | |
| 2447 MarkLiveObjects(); | 2648 MarkLiveObjects(); |
| 2448 | 2649 ClearNonLiveReferences(); |
| 2449 #ifdef VERIFY_HEAP | 2650 #ifdef VERIFY_HEAP |
| 2450 if (FLAG_verify_heap) { | 2651 if (FLAG_verify_heap) { |
| 2451 YoungGenerationMarkingVerifier verifier(heap()); | 2652 YoungGenerationMarkingVerifier verifier(heap()); |
| 2452 verifier.Run(); | 2653 verifier.Run(); |
| 2453 } | 2654 } |
| 2454 #endif // VERIFY_HEAP | 2655 #endif // VERIFY_HEAP |
| 2656 | |
| 2657 std::vector<HeapObject*> black_allocation_objects; | |
| 2658 EvacuateNewSpace(&black_allocation_objects); | |
| 2659 #ifdef VERIFY_HEAP | |
| 2660 if (FLAG_verify_heap) { | |
| 2661 YoungGenerationEvacuationVerifier verifier(heap()); | |
| 2662 verifier.Run(); | |
| 2663 } | |
| 2664 #endif // VERIFY_HEAP | |
| 2665 | |
| 2666 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); | |
| 2667 | |
| 2668 // Process black allocation objects after updating pointers as we otherwise | |
| 2669 // would end up with objects on the marking deque that potentially forward | |
| 2670 // to white objects. | |
| 2671 // TODO(mlippautz): Instead of processing them explicitly, we should just add | |
| 2672 // them to the marking deque for further processing. | |
| 2673 { | |
| 2674 TRACE_GC(heap()->tracer(), | |
| 2675 GCTracer::Scope::MINOR_MC_EVACUATE_PROCESS_BLACK_ALLOCATION); | |
| 2676 for (HeapObject* object : black_allocation_objects) { | |
| 2677 CHECK(ObjectMarking::IsBlack(object, MarkingState::Internal(object))); | |
| 2678 heap()->incremental_marking()->IterateBlackObject(object); | |
| 2679 } | |
| 2680 heap()->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); | |
| 2681 } | |
| 2682 | |
| 2683 { | |
| 2684 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); | |
| 2685 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), | |
| 2686 heap()->new_space()->FromSpaceEnd())) { | |
| 2687 marking_state(p).ClearLiveness(); | |
| 2688 } | |
| 2689 } | |
| 2690 } | |
| 2691 | |
| 2692 void MinorMarkCompactCollector::ClearNonLiveReferences() { | |
| 2693 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); | |
| 2694 | |
| 2695 { | |
| 2696 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); | |
| 2697 // Internalized strings are always stored in old space, so there is no need | |
| 2698 // to clean them here. | |
| 2699 YoungGenerationExternalStringTableCleaner external_visitor(*this); | |
| 2700 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); | |
| 2701 heap()->external_string_table_.CleanUpNewSpaceStrings(); | |
| 2702 } | |
| 2703 | |
| 2704 { | |
| 2705 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); | |
| 2706 // Process the weak references. | |
| 2707 MinorMarkCompactWeakObjectRetainer retainer(*this); | |
| 2708 heap()->ProcessYoungWeakReferences(&retainer); | |
| 2709 } | |
| 2710 } | |
| 2711 | |
| 2712 void MinorMarkCompactCollector::EvacuatePrologue() { | |
| 2713 NewSpace* new_space = heap()->new_space(); | |
| 2714 // Append the list of new space pages to be processed. | |
| 2715 for (Page* p : PageRange(new_space->bottom(), new_space->top())) { | |
| 2716 new_space_evacuation_pages_.Add(p); | |
| 2717 } | |
| 2718 new_space->Flip(); | |
| 2719 new_space->ResetAllocationInfo(); | |
| 2720 } | |
| 2721 | |
| 2722 void MinorMarkCompactCollector::EvacuateEpilogue() { | |
| 2723 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | |
| 2724 } | |
| 2725 | |
| 2726 void MinorMarkCompactCollector::EvacuateNewSpace( | |
| 2727 std::vector<HeapObject*>* black_allocation_objects) { | |
| 2728 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | |
| 2729 Heap::RelocationLock relocation_lock(heap()); | |
| 2730 | |
| 2731 { | |
| 2732 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); | |
| 2733 EvacuatePrologue(); | |
| 2734 } | |
| 2735 | |
| 2736 { | |
| 2737 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | |
| 2738 EvacuatePagesInParallel(black_allocation_objects); | |
| 2739 } | |
| 2740 | |
| 2741 UpdatePointersAfterEvacuation(); | |
| 2742 | |
| 2743 { | |
| 2744 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); | |
| 2745 if (!heap()->new_space()->Rebalance()) { | |
| 2746 FatalProcessOutOfMemory("NewSpace::Rebalance"); | |
| 2747 } | |
| 2748 } | |
| 2749 | |
| 2750 // Give pages that are queued to be freed back to the OS. | |
| 2751 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | |
| 2752 | |
| 2753 { | |
| 2754 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | |
| 2755 // TODO(mlippautz): Implement page promotion. | |
| 2756 new_space_evacuation_pages_.Rewind(0); | |
| 2757 } | |
| 2758 | |
| 2759 { | |
| 2760 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); | |
| 2761 EvacuateEpilogue(); | |
| 2762 } | |
| 2455 } | 2763 } |
| 2456 | 2764 |
| 2457 void MarkCompactCollector::MarkLiveObjects() { | 2765 void MarkCompactCollector::MarkLiveObjects() { |
| 2458 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); | 2766 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); |
| 2459 // The recursive GC marker detects when it is nearing stack overflow, | 2767 // The recursive GC marker detects when it is nearing stack overflow, |
| 2460 // and switches to a different marking system. JS interrupts interfere | 2768 // and switches to a different marking system. JS interrupts interfere |
| 2461 // with the C stack limit check. | 2769 // with the C stack limit check. |
| 2462 PostponeInterruptsScope postpone(isolate()); | 2770 PostponeInterruptsScope postpone(isolate()); |
| 2463 | 2771 |
| 2464 { | 2772 { |
| (...skipping 651 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3116 } | 3424 } |
| 3117 | 3425 |
| 3118 // NewSpacePages with more live bytes than this threshold qualify for fast | 3426 // NewSpacePages with more live bytes than this threshold qualify for fast |
| 3119 // evacuation. | 3427 // evacuation. |
| 3120 static int PageEvacuationThreshold() { | 3428 static int PageEvacuationThreshold() { |
| 3121 if (FLAG_page_promotion) | 3429 if (FLAG_page_promotion) |
| 3122 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; | 3430 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; |
| 3123 return Page::kAllocatableMemory + kPointerSize; | 3431 return Page::kAllocatableMemory + kPointerSize; |
| 3124 } | 3432 } |
| 3125 | 3433 |
| 3126 Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) | 3434 Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, |
| 3435 MigrationObserver* migration_observer) | |
| 3127 : heap_(heap), | 3436 : heap_(heap), |
| 3128 compaction_spaces_(heap_), | 3437 compaction_spaces_(heap_), |
| 3129 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), | 3438 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), |
| 3130 new_space_visitor_(heap_, &compaction_spaces_, record_visitor, | 3439 new_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| 3131 &local_pretenuring_feedback_), | 3440 migration_observer, &local_pretenuring_feedback_), |
| 3132 new_to_new_page_visitor_(heap_, record_visitor, | 3441 new_to_new_page_visitor_(heap_, record_visitor, |
| 3133 &local_pretenuring_feedback_), | 3442 &local_pretenuring_feedback_), |
| 3134 new_to_old_page_visitor_(heap_, record_visitor, | 3443 new_to_old_page_visitor_(heap_, record_visitor, |
| 3135 &local_pretenuring_feedback_), | 3444 &local_pretenuring_feedback_), |
| 3136 | 3445 |
| 3137 old_space_visitor_(heap_, &compaction_spaces_, record_visitor), | 3446 old_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| 3447 migration_observer), | |
| 3138 duration_(0.0), | 3448 duration_(0.0), |
| 3139 bytes_compacted_(0) {} | 3449 bytes_compacted_(0) {} |
| 3140 | 3450 |
| 3141 virtual ~Evacuator() {} | 3451 virtual ~Evacuator() {} |
| 3142 | 3452 |
| 3143 virtual bool EvacuatePage(Page* page, const MarkingState& state) = 0; | 3453 bool EvacuatePage(Page* page, const MarkingState& state); |
| 3454 virtual bool EvacuatePageImpl(Page* page, const MarkingState& state) = 0; | |
| 3144 | 3455 |
| 3145 // Merge back locally cached info sequentially. Note that this method needs | 3456 // Merge back locally cached info sequentially. Note that this method needs |
| 3146 // to be called from the main thread. | 3457 // to be called from the main thread. |
| 3147 inline void Finalize(); | 3458 inline void Finalize(); |
| 3148 | 3459 |
| 3149 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | 3460 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } |
| 3150 AllocationInfo CloseNewSpaceLAB() { return new_space_visitor_.CloseLAB(); } | 3461 AllocationInfo CloseNewSpaceLAB() { return new_space_visitor_.CloseLAB(); } |
| 3151 | 3462 |
| 3152 protected: | 3463 protected: |
| 3153 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | 3464 static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 3171 new_to_new_page_visitor_; | 3482 new_to_new_page_visitor_; |
| 3172 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD> | 3483 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD> |
| 3173 new_to_old_page_visitor_; | 3484 new_to_old_page_visitor_; |
| 3174 EvacuateOldSpaceVisitor old_space_visitor_; | 3485 EvacuateOldSpaceVisitor old_space_visitor_; |
| 3175 | 3486 |
| 3176 // Book keeping info. | 3487 // Book keeping info. |
| 3177 double duration_; | 3488 double duration_; |
| 3178 intptr_t bytes_compacted_; | 3489 intptr_t bytes_compacted_; |
| 3179 }; | 3490 }; |
| 3180 | 3491 |
| 3492 bool Evacuator::EvacuatePage(Page* page, const MarkingState& state) { | |
| 3493 bool success = false; | |
| 3494 DCHECK(page->SweepingDone()); | |
| 3495 intptr_t saved_live_bytes = state.live_bytes(); | |
| 3496 double evacuation_time = 0.0; | |
| 3497 { | |
| 3498 AlwaysAllocateScope always_allocate(heap()->isolate()); | |
| 3499 TimedScope timed_scope(&evacuation_time); | |
| 3500 success = EvacuatePageImpl(page, state); | |
| 3501 } | |
| 3502 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
| 3503 if (FLAG_trace_evacuation) { | |
| 3504 PrintIsolate( | |
| 3505 heap()->isolate(), | |
| 3506 "evacuation[%p]: page=%p new_space=%d " | |
| 3507 "page_evacuation=%d executable=%d contains_age_mark=%d " | |
| 3508 "live_bytes=%" V8PRIdPTR " time=%f page_promotion_qualifies=%d\n", | |
| 3509 static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(), | |
| 3510 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || | |
| 3511 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), | |
| 3512 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), | |
| 3513 page->Contains(heap()->new_space()->age_mark()), saved_live_bytes, | |
| 3514 evacuation_time, | |
| 3515 saved_live_bytes > Evacuator::PageEvacuationThreshold()); | |
| 3516 } | |
| 3517 return success; | |
| 3518 } | |
| 3519 | |
| 3181 void Evacuator::Finalize() { | 3520 void Evacuator::Finalize() { |
| 3182 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3521 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
| 3183 heap()->code_space()->MergeCompactionSpace( | 3522 heap()->code_space()->MergeCompactionSpace( |
| 3184 compaction_spaces_.Get(CODE_SPACE)); | 3523 compaction_spaces_.Get(CODE_SPACE)); |
| 3185 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3524 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
| 3186 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + | 3525 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + |
| 3187 new_to_old_page_visitor_.moved_bytes()); | 3526 new_to_old_page_visitor_.moved_bytes()); |
| 3188 heap()->IncrementSemiSpaceCopiedObjectSize( | 3527 heap()->IncrementSemiSpaceCopiedObjectSize( |
| 3189 new_space_visitor_.semispace_copied_size() + | 3528 new_space_visitor_.semispace_copied_size() + |
| 3190 new_to_new_page_visitor_.moved_bytes()); | 3529 new_to_new_page_visitor_.moved_bytes()); |
| 3191 heap()->IncrementYoungSurvivorsCounter( | 3530 heap()->IncrementYoungSurvivorsCounter( |
| 3192 new_space_visitor_.promoted_size() + | 3531 new_space_visitor_.promoted_size() + |
| 3193 new_space_visitor_.semispace_copied_size() + | 3532 new_space_visitor_.semispace_copied_size() + |
| 3194 new_to_old_page_visitor_.moved_bytes() + | 3533 new_to_old_page_visitor_.moved_bytes() + |
| 3195 new_to_new_page_visitor_.moved_bytes()); | 3534 new_to_new_page_visitor_.moved_bytes()); |
| 3196 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | 3535 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
| 3197 } | 3536 } |
| 3198 | 3537 |
| 3199 class FullEvacuator : public Evacuator { | 3538 class FullEvacuator : public Evacuator { |
| 3200 public: | 3539 public: |
| 3201 FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) | 3540 FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, |
| 3202 : Evacuator(heap, record_visitor) {} | 3541 MigrationObserver* migration_observer) |
| 3542 : Evacuator(heap, record_visitor, migration_observer) {} | |
| 3203 | 3543 |
| 3204 bool EvacuatePage(Page* page, const MarkingState& state) override; | 3544 bool EvacuatePageImpl(Page* page, const MarkingState& state) override; |
| 3205 }; | 3545 }; |
| 3206 | 3546 |
| 3207 bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) { | 3547 bool FullEvacuator::EvacuatePageImpl(Page* page, const MarkingState& state) { |
| 3208 bool success = false; | 3548 bool success = false; |
| 3209 DCHECK(page->SweepingDone()); | 3549 LiveObjectVisitor object_visitor; |
| 3210 intptr_t saved_live_bytes = state.live_bytes(); | 3550 switch (ComputeEvacuationMode(page)) { |
| 3211 double evacuation_time = 0.0; | 3551 case kObjectsNewToOld: |
| 3212 { | 3552 success = object_visitor.VisitBlackObjects( |
| 3213 AlwaysAllocateScope always_allocate(heap()->isolate()); | 3553 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); |
| 3214 TimedScope timed_scope(&evacuation_time); | 3554 DCHECK(success); |
| 3215 LiveObjectVisitor object_visitor; | 3555 ArrayBufferTracker::ProcessBuffers( |
| 3216 switch (ComputeEvacuationMode(page)) { | 3556 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3217 case kObjectsNewToOld: | 3557 break; |
| 3218 success = | 3558 case kPageNewToOld: |
| 3219 object_visitor.VisitBlackObjects(page, state, &new_space_visitor_, | 3559 success = object_visitor.VisitBlackObjects( |
| 3220 LiveObjectVisitor::kClearMarkbits); | 3560 page, state, &new_to_old_page_visitor_, |
| 3561 LiveObjectVisitor::kKeepMarking); | |
| 3562 DCHECK(success); | |
| 3563 new_to_old_page_visitor_.account_moved_bytes( | |
| 3564 MarkingState::Internal(page).live_bytes()); | |
| 3565 // ArrayBufferTracker will be updated during sweeping. | |
| 3566 break; | |
| 3567 case kPageNewToNew: | |
| 3568 success = object_visitor.VisitBlackObjects( | |
| 3569 page, state, &new_to_new_page_visitor_, | |
| 3570 LiveObjectVisitor::kKeepMarking); | |
| 3571 DCHECK(success); | |
| 3572 new_to_new_page_visitor_.account_moved_bytes( | |
| 3573 MarkingState::Internal(page).live_bytes()); | |
| 3574 // ArrayBufferTracker will be updated during sweeping. | |
| 3575 break; | |
| 3576 case kObjectsOldToOld: | |
| 3577 success = object_visitor.VisitBlackObjects( | |
| 3578 page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits); | |
| 3579 if (!success) { | |
| 3580 // Aborted compaction page. We have to record slots here, since we | |
| 3581 // might not have recorded them in first place. | |
| 3582 // Note: We mark the page as aborted here to be able to record slots | |
| 3583 // for code objects in |RecordMigratedSlotVisitor|. | |
| 3584 page->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
| 3585 EvacuateRecordOnlyVisitor record_visitor(heap()); | |
| 3586 success = object_visitor.VisitBlackObjects( | |
| 3587 page, state, &record_visitor, LiveObjectVisitor::kKeepMarking); | |
| 3588 ArrayBufferTracker::ProcessBuffers( | |
| 3589 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); | |
| 3221 DCHECK(success); | 3590 DCHECK(success); |
| 3591 // We need to return failure here to indicate that we want this page | |
| 3592 // added to the sweeper. | |
| 3593 success = false; | |
| 3594 } else { | |
| 3222 ArrayBufferTracker::ProcessBuffers( | 3595 ArrayBufferTracker::ProcessBuffers( |
| 3223 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | 3596 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3224 break; | 3597 } |
| 3225 case kPageNewToOld: | 3598 break; |
| 3226 success = object_visitor.VisitBlackObjects( | |
| 3227 page, state, &new_to_old_page_visitor_, | |
| 3228 LiveObjectVisitor::kKeepMarking); | |
| 3229 DCHECK(success); | |
| 3230 new_to_old_page_visitor_.account_moved_bytes( | |
| 3231 MarkingState::Internal(page).live_bytes()); | |
| 3232 // ArrayBufferTracker will be updated during sweeping. | |
| 3233 break; | |
| 3234 case kPageNewToNew: | |
| 3235 success = object_visitor.VisitBlackObjects( | |
| 3236 page, state, &new_to_new_page_visitor_, | |
| 3237 LiveObjectVisitor::kKeepMarking); | |
| 3238 DCHECK(success); | |
| 3239 new_to_new_page_visitor_.account_moved_bytes( | |
| 3240 MarkingState::Internal(page).live_bytes()); | |
| 3241 // ArrayBufferTracker will be updated during sweeping. | |
| 3242 break; | |
| 3243 case kObjectsOldToOld: | |
| 3244 success = | |
| 3245 object_visitor.VisitBlackObjects(page, state, &old_space_visitor_, | |
| 3246 LiveObjectVisitor::kClearMarkbits); | |
| 3247 if (!success) { | |
| 3248 // Aborted compaction page. We have to record slots here, since we | |
| 3249 // might not have recorded them in first place. | |
| 3250 // Note: We mark the page as aborted here to be able to record slots | |
| 3251 // for code objects in |RecordMigratedSlotVisitor|. | |
| 3252 page->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
| 3253 EvacuateRecordOnlyVisitor record_visitor(heap()); | |
| 3254 success = object_visitor.VisitBlackObjects( | |
| 3255 page, state, &record_visitor, LiveObjectVisitor::kKeepMarking); | |
| 3256 ArrayBufferTracker::ProcessBuffers( | |
| 3257 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); | |
| 3258 DCHECK(success); | |
| 3259 // We need to return failure here to indicate that we want this page | |
| 3260 // added to the sweeper. | |
| 3261 success = false; | |
| 3262 } else { | |
| 3263 ArrayBufferTracker::ProcessBuffers( | |
| 3264 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3265 } | |
| 3266 break; | |
| 3267 } | |
| 3268 } | |
| 3269 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
| 3270 if (FLAG_trace_evacuation) { | |
| 3271 PrintIsolate(heap()->isolate(), | |
| 3272 "evacuation[%p]: page=%p new_space=%d " | |
| 3273 "page_evacuation=%d executable=%d contains_age_mark=%d " | |
| 3274 "live_bytes=%" V8PRIdPTR " time=%f\n", | |
| 3275 static_cast<void*>(this), static_cast<void*>(page), | |
| 3276 page->InNewSpace(), | |
| 3277 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || | |
| 3278 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), | |
| 3279 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), | |
| 3280 page->Contains(heap()->new_space()->age_mark()), | |
| 3281 saved_live_bytes, evacuation_time); | |
| 3282 } | 3599 } |
| 3283 return success; | 3600 return success; |
| 3284 } | 3601 } |
| 3285 | 3602 |
| 3286 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3603 class YoungGenerationEvacuator : public Evacuator { |
| 3287 intptr_t live_bytes) { | 3604 public: |
| 3288 if (!FLAG_parallel_compaction) return 1; | 3605 YoungGenerationEvacuator(Heap* heap, |
| 3289 // Compute the number of needed tasks based on a target compaction time, the | 3606 RecordMigratedSlotVisitor* record_visitor, |
| 3290 // profiled compaction speed and marked live memory. | 3607 MigrationObserver* migration_observer) |
| 3291 // | 3608 : Evacuator(heap, record_visitor, migration_observer) {} |
| 3292 // The number of parallel compaction tasks is limited by: | |
| 3293 // - #evacuation pages | |
| 3294 // - #cores | |
| 3295 const double kTargetCompactionTimeInMs = .5; | |
| 3296 | 3609 |
| 3297 double compaction_speed = | 3610 bool EvacuatePageImpl(Page* page, const MarkingState& state) override; |
| 3298 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3611 }; |
| 3299 | 3612 |
| 3300 const int available_cores = Max( | 3613 bool YoungGenerationEvacuator::EvacuatePageImpl(Page* page, |
| 3301 1, static_cast<int>( | 3614 const MarkingState& state) { |
| 3302 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); | 3615 bool success = false; |
| 3303 int tasks; | 3616 LiveObjectVisitor object_visitor; |
| 3304 if (compaction_speed > 0) { | 3617 switch (ComputeEvacuationMode(page)) { |
| 3305 tasks = 1 + static_cast<int>(live_bytes / compaction_speed / | 3618 case kObjectsNewToOld: |
| 3306 kTargetCompactionTimeInMs); | 3619 success = object_visitor.VisitBlackObjects( |
| 3307 } else { | 3620 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); |
| 3308 tasks = pages; | 3621 DCHECK(success); |
| 3622 ArrayBufferTracker::ProcessBuffers( | |
| 3623 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3624 break; | |
| 3625 case kPageNewToOld: | |
| 3626 // TODO(mlippautz): Implement page promotion. | |
| 3627 UNREACHABLE(); | |
| 3628 break; | |
| 3629 case kPageNewToNew: | |
| 3630 // TODO(mlippautz): Implement page promotion. | |
| 3631 UNREACHABLE(); | |
| 3632 break; | |
| 3633 case kObjectsOldToOld: | |
| 3634 UNREACHABLE(); | |
| 3635 break; | |
| 3309 } | 3636 } |
| 3310 const int tasks_capped_pages = Min(pages, tasks); | 3637 return success; |
| 3311 return Min(available_cores, tasks_capped_pages); | |
| 3312 } | 3638 } |
| 3313 | 3639 |
| 3314 class EvacuationJobTraits { | 3640 class EvacuationJobTraits { |
| 3315 public: | 3641 public: |
| 3316 typedef int* PerPageData; // Pointer to number of aborted pages. | 3642 struct PageData { |
| 3643 int* abandoned_pages; // Pointer to number of aborted pages. | |
| 3644 MarkingState marking_state; | |
| 3645 }; | |
| 3646 | |
| 3647 typedef PageData PerPageData; | |
| 3317 typedef Evacuator* PerTaskData; | 3648 typedef Evacuator* PerTaskData; |
| 3318 | 3649 |
| 3319 static const bool NeedSequentialFinalization = true; | 3650 static const bool NeedSequentialFinalization = true; |
| 3320 | 3651 |
| 3321 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3652 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
| 3322 MemoryChunk* chunk, PerPageData) { | 3653 MemoryChunk* chunk, PerPageData data) { |
| 3323 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk), | 3654 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk), |
| 3324 MarkingState::Internal(chunk)); | 3655 data.marking_state); |
| 3325 } | 3656 } |
| 3326 | 3657 |
| 3327 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3658 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
| 3328 bool success, PerPageData data) { | 3659 bool success, PerPageData data) { |
| 3329 Page* p = static_cast<Page*>(chunk); | 3660 Page* p = static_cast<Page*>(chunk); |
| 3330 switch (Evacuator::ComputeEvacuationMode(p)) { | 3661 switch (Evacuator::ComputeEvacuationMode(p)) { |
| 3331 case Evacuator::kPageNewToOld: | 3662 case Evacuator::kPageNewToOld: |
| 3332 break; | 3663 break; |
| 3333 case Evacuator::kPageNewToNew: | 3664 case Evacuator::kPageNewToNew: |
| 3334 DCHECK(success); | 3665 DCHECK(success); |
| 3335 break; | 3666 break; |
| 3336 case Evacuator::kObjectsNewToOld: | 3667 case Evacuator::kObjectsNewToOld: |
| 3337 DCHECK(success); | 3668 DCHECK(success); |
| 3338 break; | 3669 break; |
| 3339 case Evacuator::kObjectsOldToOld: | 3670 case Evacuator::kObjectsOldToOld: |
| 3340 if (success) { | 3671 if (success) { |
| 3341 DCHECK(p->IsEvacuationCandidate()); | 3672 DCHECK(p->IsEvacuationCandidate()); |
| 3342 DCHECK(p->SweepingDone()); | 3673 DCHECK(p->SweepingDone()); |
| 3343 p->Unlink(); | 3674 p->Unlink(); |
| 3344 } else { | 3675 } else { |
| 3345 // We have partially compacted the page, i.e., some objects may have | 3676 // We have partially compacted the page, i.e., some objects may have |
| 3346 // moved, others are still in place. | 3677 // moved, others are still in place. |
| 3347 p->ClearEvacuationCandidate(); | 3678 p->ClearEvacuationCandidate(); |
| 3348 // Slots have already been recorded so we just need to add it to the | 3679 // Slots have already been recorded so we just need to add it to the |
| 3349 // sweeper, which will happen after updating pointers. | 3680 // sweeper, which will happen after updating pointers. |
| 3350 *data += 1; | 3681 *data.abandoned_pages += 1; |
| 3351 } | 3682 } |
| 3352 break; | 3683 break; |
| 3353 default: | 3684 default: |
| 3354 UNREACHABLE(); | 3685 UNREACHABLE(); |
| 3355 } | 3686 } |
| 3356 } | 3687 } |
| 3357 }; | 3688 }; |
| 3358 | 3689 |
| 3690 namespace { | |
| 3691 | |
| 3692 // The number of parallel compaction tasks, including the main thread. | |
| 3693 int NumberOfParallelCompactionTasks(Heap* heap, int pages, | |
| 3694 intptr_t live_bytes) { | |
| 3695 if (!FLAG_parallel_compaction) return 1; | |
| 3696 // Compute the number of needed tasks based on a target compaction time, the | |
| 3697 // profiled compaction speed and marked live memory. | |
| 3698 // | |
| 3699 // The number of parallel compaction tasks is limited by: | |
| 3700 // - #evacuation pages | |
| 3701 // - #cores | |
| 3702 const double kTargetCompactionTimeInMs = .5; | |
| 3703 | |
| 3704 const double compaction_speed = | |
| 3705 heap->tracer()->CompactionSpeedInBytesPerMillisecond(); | |
| 3706 | |
| 3707 const int available_cores = Max( | |
| 3708 1, static_cast<int>( | |
| 3709 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); | |
| 3710 int tasks; | |
| 3711 if (compaction_speed > 0) { | |
| 3712 tasks = 1 + static_cast<int>(live_bytes / compaction_speed / | |
| 3713 kTargetCompactionTimeInMs); | |
| 3714 } else { | |
| 3715 tasks = pages; | |
| 3716 } | |
| 3717 const int tasks_capped_pages = Min(pages, tasks); | |
| 3718 return Min(available_cores, tasks_capped_pages); | |
| 3719 } | |
| 3720 | |
| 3721 template <class E, class SV> | |
| 3722 void CreateAndExecuteEvacuationTasks(Heap* heap, | |
| 3723 PageParallelJob<EvacuationJobTraits>* job, | |
| 3724 RecordMigratedSlotVisitor* record_visitor, | |
| 3725 MigrationObserver* observer, | |
| 3726 const intptr_t live_bytes, | |
| 3727 const int& abandoned_pages) { | |
| 3728 // Used for trace summary. | |
| 3729 double compaction_speed = 0; | |
| 3730 if (FLAG_trace_evacuation) { | |
| 3731 compaction_speed = heap->tracer()->CompactionSpeedInBytesPerMillisecond(); | |
| 3732 } | |
| 3733 | |
| 3734 const int wanted_num_tasks = | |
| 3735 NumberOfParallelCompactionTasks(heap, job->NumberOfPages(), live_bytes); | |
| 3736 E** evacuators = new E*[wanted_num_tasks]; | |
| 3737 SV** slots_recorders = new SV*[wanted_num_tasks]; | |
| 3738 for (int i = 0; i < wanted_num_tasks; i++) { | |
| 3739 slots_recorders[i] = new SV(heap->mark_compact_collector()); | |
| 3740 evacuators[i] = new E(heap, slots_recorders[i], observer); | |
| 3741 } | |
| 3742 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | |
| 3743 const Address top = heap->new_space()->top(); | |
| 3744 for (int i = 0; i < wanted_num_tasks; i++) { | |
| 3745 evacuators[i]->Finalize(); | |
| 3746 // Try to find the last LAB that was used for new space allocation in | |
| 3747 // evacuation tasks. If it was adjacent to the current top, move top back. | |
| 3748 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); | |
| 3749 if (info.limit() != nullptr && info.limit() == top) { | |
| 3750 DCHECK_NOT_NULL(info.top()); | |
| 3751 *heap->new_space()->allocation_top_address() = info.top(); | |
| 3752 } | |
| 3753 delete evacuators[i]; | |
| 3754 delete slots_recorders[i]; | |
| 3755 } | |
| 3756 delete[] evacuators; | |
| 3757 delete[] slots_recorders; | |
| 3758 | |
| 3759 if (FLAG_trace_evacuation) { | |
| 3760 PrintIsolate(heap->isolate(), | |
| 3761 "%8.0f ms: evacuation-summary: parallel=%s pages=%d " | |
| 3762 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS | |
| 3763 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", | |
| 3764 heap->isolate()->time_millis_since_init(), | |
| 3765 FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(), | |
| 3766 abandoned_pages, wanted_num_tasks, job->NumberOfTasks(), | |
| 3767 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), | |
| 3768 live_bytes, compaction_speed); | |
| 3769 } | |
| 3770 } | |
| 3771 | |
| 3772 } // namespace | |
| 3773 | |
| 3359 void MarkCompactCollector::EvacuatePagesInParallel() { | 3774 void MarkCompactCollector::EvacuatePagesInParallel() { |
| 3360 PageParallelJob<EvacuationJobTraits> job( | 3775 PageParallelJob<EvacuationJobTraits> job( |
| 3361 heap_, heap_->isolate()->cancelable_task_manager(), | 3776 heap_, heap_->isolate()->cancelable_task_manager(), |
| 3362 &page_parallel_job_semaphore_); | 3777 &page_parallel_job_semaphore_); |
| 3363 | 3778 |
| 3364 int abandoned_pages = 0; | 3779 int abandoned_pages = 0; |
| 3365 intptr_t live_bytes = 0; | 3780 intptr_t live_bytes = 0; |
| 3366 for (Page* page : old_space_evacuation_pages_) { | 3781 for (Page* page : old_space_evacuation_pages_) { |
| 3367 live_bytes += MarkingState::Internal(page).live_bytes(); | 3782 live_bytes += MarkingState::Internal(page).live_bytes(); |
| 3368 job.AddPage(page, &abandoned_pages); | 3783 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| 3369 } | 3784 } |
| 3370 | 3785 |
| 3371 const bool reduce_memory = heap()->ShouldReduceMemory(); | 3786 const bool reduce_memory = heap()->ShouldReduceMemory(); |
| 3372 const Address age_mark = heap()->new_space()->age_mark(); | 3787 const Address age_mark = heap()->new_space()->age_mark(); |
| 3373 for (Page* page : new_space_evacuation_pages_) { | 3788 for (Page* page : new_space_evacuation_pages_) { |
| 3374 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); | 3789 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); |
| 3375 live_bytes += live_bytes_on_page; | 3790 live_bytes += live_bytes_on_page; |
| 3376 if (!reduce_memory && !page->NeverEvacuate() && | 3791 if (!reduce_memory && !page->NeverEvacuate() && |
| 3377 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && | 3792 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && |
| 3378 !page->Contains(age_mark) && | 3793 !page->Contains(age_mark) && |
| 3379 heap()->CanExpandOldGeneration(live_bytes_on_page)) { | 3794 heap()->CanExpandOldGeneration(live_bytes_on_page)) { |
| 3380 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { | 3795 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
| 3381 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); | 3796 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); |
| 3382 } else { | 3797 } else { |
| 3383 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); | 3798 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); |
| 3384 } | 3799 } |
| 3385 } | 3800 } |
| 3386 | 3801 |
| 3387 job.AddPage(page, &abandoned_pages); | 3802 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| 3388 } | 3803 } |
| 3389 DCHECK_GE(job.NumberOfPages(), 1); | 3804 DCHECK_GE(job.NumberOfPages(), 1); |
| 3390 | 3805 |
| 3391 // Used for trace summary. | 3806 MigrationObserver observer(heap()); |
| 3392 double compaction_speed = 0; | 3807 CreateAndExecuteEvacuationTasks<FullEvacuator, RecordMigratedSlotVisitor>( |
| 3393 if (FLAG_trace_evacuation) { | 3808 heap(), &job, nullptr, &observer, live_bytes, abandoned_pages); |
| 3394 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3809 } |
| 3810 | |
| 3811 void MinorMarkCompactCollector::EvacuatePagesInParallel( | |
| 3812 std::vector<HeapObject*>* black_allocation_objects) { | |
| 3813 PageParallelJob<EvacuationJobTraits> job( | |
| 3814 heap_, heap_->isolate()->cancelable_task_manager(), | |
| 3815 &page_parallel_job_semaphore_); | |
| 3816 | |
| 3817 int abandoned_pages = 0; | |
| 3818 intptr_t live_bytes = 0; | |
| 3819 | |
| 3820 for (Page* page : new_space_evacuation_pages_) { | |
| 3821 intptr_t live_bytes_on_page = marking_state(page).live_bytes(); | |
| 3822 live_bytes += live_bytes_on_page; | |
| 3823 // TODO(mlippautz): Implement page promotion. | |
| 3824 job.AddPage(page, {&abandoned_pages, marking_state(page)}); | |
| 3395 } | 3825 } |
| 3826 DCHECK_GE(job.NumberOfPages(), 1); | |
| 3396 | 3827 |
| 3397 const int wanted_num_tasks = | 3828 YoungGenerationMigrationObserver observer( |
| 3398 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); | 3829 heap(), heap()->mark_compact_collector(), black_allocation_objects); |
| 3399 FullEvacuator** evacuators = new FullEvacuator*[wanted_num_tasks]; | 3830 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator, |
|
Michael Lippautz
2017/04/21 07:05:52
The idea is to have a stateless YoungGenerationRec
Hannes Payer (out of office)
2017/04/21 14:46:26
Acknowledged.
| |
| 3400 RecordMigratedSlotVisitor record_visitor(this); | 3831 YoungGenerationRecordMigratedSlotVisitor>( |
| 3401 for (int i = 0; i < wanted_num_tasks; i++) { | 3832 heap(), &job, nullptr, &observer, live_bytes, abandoned_pages); |
| 3402 evacuators[i] = new FullEvacuator(heap(), &record_visitor); | |
| 3403 } | |
| 3404 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | |
| 3405 const Address top = heap()->new_space()->top(); | |
| 3406 for (int i = 0; i < wanted_num_tasks; i++) { | |
| 3407 evacuators[i]->Finalize(); | |
| 3408 // Try to find the last LAB that was used for new space allocation in | |
| 3409 // evacuation tasks. If it was adjacent to the current top, move top back. | |
| 3410 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); | |
| 3411 if (info.limit() != nullptr && info.limit() == top) { | |
| 3412 DCHECK_NOT_NULL(info.top()); | |
| 3413 *heap()->new_space()->allocation_top_address() = info.top(); | |
| 3414 } | |
| 3415 delete evacuators[i]; | |
| 3416 } | |
| 3417 delete[] evacuators; | |
| 3418 | |
| 3419 if (FLAG_trace_evacuation) { | |
| 3420 PrintIsolate(isolate(), | |
| 3421 "%8.0f ms: evacuation-summary: parallel=%s pages=%d " | |
| 3422 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS | |
| 3423 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", | |
| 3424 isolate()->time_millis_since_init(), | |
| 3425 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), | |
| 3426 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), | |
| 3427 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), | |
| 3428 live_bytes, compaction_speed); | |
| 3429 } | |
| 3430 } | 3833 } |
| 3431 | 3834 |
| 3432 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3835 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| 3433 public: | 3836 public: |
| 3434 virtual Object* RetainAs(Object* object) { | 3837 virtual Object* RetainAs(Object* object) { |
| 3435 if (object->IsHeapObject()) { | 3838 if (object->IsHeapObject()) { |
| 3436 HeapObject* heap_object = HeapObject::cast(object); | 3839 HeapObject* heap_object = HeapObject::cast(object); |
| 3437 MapWord map_word = heap_object->map_word(); | 3840 MapWord map_word = heap_object->map_word(); |
| 3438 if (map_word.IsForwardingAddress()) { | 3841 if (map_word.IsForwardingAddress()) { |
| 3439 return map_word.ToForwardingAddress(); | 3842 return map_word.ToForwardingAddress(); |
| (...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3744 UpdateUntypedPointers(heap, chunk); | 4147 UpdateUntypedPointers(heap, chunk); |
| 3745 UpdateTypedPointers(heap, chunk); | 4148 UpdateTypedPointers(heap, chunk); |
| 3746 return true; | 4149 return true; |
| 3747 } | 4150 } |
| 3748 static const bool NeedSequentialFinalization = false; | 4151 static const bool NeedSequentialFinalization = false; |
| 3749 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4152 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
| 3750 } | 4153 } |
| 3751 | 4154 |
| 3752 private: | 4155 private: |
| 3753 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | 4156 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
| 4157 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); | |
| 3754 if (type == OLD_TO_NEW) { | 4158 if (type == OLD_TO_NEW) { |
| 3755 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { | 4159 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
| 3756 return CheckAndUpdateOldToNewSlot(heap, slot); | 4160 return CheckAndUpdateOldToNewSlot(heap, slot); |
| 3757 }); | 4161 }); |
| 3758 } else { | 4162 } else { |
| 3759 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { | 4163 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { |
| 3760 return UpdateSlot(reinterpret_cast<Object**>(slot)); | 4164 return UpdateSlot(reinterpret_cast<Object**>(slot)); |
| 3761 }); | 4165 }); |
| 3762 } | 4166 } |
| 3763 } | 4167 } |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3850 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4254 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3851 RememberedSet<type>::IterateMemoryChunks( | 4255 RememberedSet<type>::IterateMemoryChunks( |
| 3852 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 4256 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
| 3853 int num_pages = job.NumberOfPages(); | 4257 int num_pages = job.NumberOfPages(); |
| 3854 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 4258 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
| 3855 job.Run(num_tasks, [](int i) { return 0; }); | 4259 job.Run(num_tasks, [](int i) { return 0; }); |
| 3856 } | 4260 } |
| 3857 | 4261 |
| 3858 class ToSpacePointerUpdateJobTraits { | 4262 class ToSpacePointerUpdateJobTraits { |
| 3859 public: | 4263 public: |
| 3860 typedef std::pair<Address, Address> PerPageData; | 4264 struct PageData { |
| 4265 Address start; | |
| 4266 Address end; | |
| 4267 MarkingState marking_state; | |
| 4268 }; | |
| 4269 | |
| 4270 typedef PageData PerPageData; | |
| 3861 typedef PointersUpdatingVisitor* PerTaskData; | 4271 typedef PointersUpdatingVisitor* PerTaskData; |
| 3862 | 4272 |
| 3863 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 4273 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
| 3864 MemoryChunk* chunk, PerPageData limits) { | 4274 MemoryChunk* chunk, PerPageData page_data) { |
| 3865 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | 4275 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3866 // New->new promoted pages contain garbage so they require iteration | 4276 // New->new promoted pages contain garbage so they require iteration |
| 3867 // using markbits. | 4277 // using markbits. |
| 3868 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); | 4278 ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data); |
| 3869 } else { | 4279 } else { |
| 3870 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); | 4280 ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); |
| 3871 } | 4281 } |
| 3872 return true; | 4282 return true; |
| 3873 } | 4283 } |
| 3874 | 4284 |
| 3875 static const bool NeedSequentialFinalization = false; | 4285 static const bool NeedSequentialFinalization = false; |
| 3876 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4286 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
| 3877 } | 4287 } |
| 3878 | 4288 |
| 3879 private: | 4289 private: |
| 3880 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, | 4290 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
| 3881 MemoryChunk* chunk, | 4291 MemoryChunk* chunk, |
| 3882 PerPageData limits) { | 4292 PerPageData page_data) { |
| 3883 for (Address cur = limits.first; cur < limits.second;) { | 4293 for (Address cur = page_data.start; cur < page_data.end;) { |
| 3884 HeapObject* object = HeapObject::FromAddress(cur); | 4294 HeapObject* object = HeapObject::FromAddress(cur); |
| 3885 Map* map = object->map(); | 4295 Map* map = object->map(); |
| 3886 int size = object->SizeFromMap(map); | 4296 int size = object->SizeFromMap(map); |
| 3887 object->IterateBody(map->instance_type(), size, visitor); | 4297 object->IterateBody(map->instance_type(), size, visitor); |
| 3888 cur += size; | 4298 cur += size; |
| 3889 } | 4299 } |
| 3890 } | 4300 } |
| 3891 | 4301 |
| 3892 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, | 4302 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
| 3893 MemoryChunk* chunk, | 4303 MemoryChunk* chunk, |
| 3894 PerPageData limits) { | 4304 PerPageData page_data) { |
| 3895 LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk)); | 4305 LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state); |
| 3896 HeapObject* object = NULL; | 4306 HeapObject* object = NULL; |
| 3897 while ((object = it.Next()) != NULL) { | 4307 while ((object = it.Next()) != NULL) { |
| 3898 Map* map = object->map(); | 4308 Map* map = object->map(); |
| 3899 int size = object->SizeFromMap(map); | 4309 int size = object->SizeFromMap(map); |
| 3900 object->IterateBody(map->instance_type(), size, visitor); | 4310 object->IterateBody(map->instance_type(), size, visitor); |
| 3901 } | 4311 } |
| 3902 } | 4312 } |
| 3903 }; | 4313 }; |
| 3904 | 4314 |
| 3905 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 4315 template <class MarkingStateProvider> |
| 4316 void UpdateToSpacePointersInParallel( | |
| 4317 Heap* heap, base::Semaphore* semaphore, | |
| 4318 const MarkingStateProvider& marking_state_provider) { | |
| 3906 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 4319 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
| 3907 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4320 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3908 Address space_start = heap->new_space()->bottom(); | 4321 Address space_start = heap->new_space()->bottom(); |
| 3909 Address space_end = heap->new_space()->top(); | 4322 Address space_end = heap->new_space()->top(); |
| 3910 for (Page* page : PageRange(space_start, space_end)) { | 4323 for (Page* page : PageRange(space_start, space_end)) { |
| 3911 Address start = | 4324 Address start = |
| 3912 page->Contains(space_start) ? space_start : page->area_start(); | 4325 page->Contains(space_start) ? space_start : page->area_start(); |
| 3913 Address end = page->Contains(space_end) ? space_end : page->area_end(); | 4326 Address end = page->Contains(space_end) ? space_end : page->area_end(); |
| 3914 job.AddPage(page, std::make_pair(start, end)); | 4327 job.AddPage(page, {start, end, marking_state_provider.marking_state(page)}); |
| 3915 } | 4328 } |
| 3916 PointersUpdatingVisitor visitor; | 4329 PointersUpdatingVisitor visitor; |
| 3917 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; | 4330 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
| 3918 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 4331 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
| 3919 } | 4332 } |
| 3920 | 4333 |
| 3921 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 4334 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| 3922 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 4335 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
| 3923 | 4336 |
| 3924 PointersUpdatingVisitor updating_visitor; | 4337 PointersUpdatingVisitor updating_visitor; |
| 3925 | 4338 |
| 3926 { | 4339 { |
| 3927 TRACE_GC(heap()->tracer(), | 4340 TRACE_GC(heap()->tracer(), |
| 3928 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 4341 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| 3929 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); | 4342 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| 4343 *this); | |
| 3930 // Update roots. | 4344 // Update roots. |
| 3931 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 4345 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| 3932 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | 4346 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
| 3933 } | 4347 } |
| 3934 | 4348 |
| 3935 { | 4349 { |
| 3936 Heap* heap = this->heap(); | 4350 Heap* heap = this->heap(); |
| 3937 TRACE_GC(heap->tracer(), | 4351 TRACE_GC(heap->tracer(), |
| 3938 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 4352 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
| 3939 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); | 4353 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); |
| 3940 } | 4354 } |
| 3941 | 4355 |
| 3942 { | 4356 { |
| 3943 TRACE_GC(heap()->tracer(), | 4357 TRACE_GC(heap()->tracer(), |
| 3944 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 4358 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
| 3945 // Update pointers from external string table. | 4359 // Update pointers from external string table. |
| 3946 heap_->UpdateReferencesInExternalStringTable( | 4360 heap_->UpdateReferencesInExternalStringTable( |
| 3947 &UpdateReferenceInExternalStringTableEntry); | 4361 &UpdateReferenceInExternalStringTableEntry); |
| 3948 | 4362 |
| 3949 EvacuationWeakObjectRetainer evacuation_object_retainer; | 4363 EvacuationWeakObjectRetainer evacuation_object_retainer; |
| 3950 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | 4364 heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
| 3951 } | 4365 } |
| 3952 } | 4366 } |
| 3953 | 4367 |
| 4368 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { | |
| 4369 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | |
| 4370 | |
| 4371 PointersUpdatingVisitor updating_visitor; | |
| 4372 | |
| 4373 { | |
| 4374 TRACE_GC(heap()->tracer(), | |
| 4375 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | |
| 4376 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, | |
| 4377 *this); | |
| 4378 // TODO(mlippautz): Iteration mode is not optimal as we process all | |
| 4379 // global handles. Find a way to only process the ones related to new | |
| 4380 // space. | |
| 4381 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | |
| 4382 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | |
| 4383 } | |
| 4384 | |
| 4385 { | |
| 4386 TRACE_GC(heap()->tracer(), | |
| 4387 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | |
| 4388 | |
| 4389 EvacuationWeakObjectRetainer evacuation_object_retainer; | |
| 4390 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | |
| 4391 | |
| 4392 // Update pointers from external string table. | |
| 4393 heap()->UpdateNewSpaceReferencesInExternalStringTable( | |
| 4394 &UpdateReferenceInExternalStringTableEntry); | |
| 4395 heap()->VisitEncounteredWeakCollections(&updating_visitor); | |
| 4396 heap()->set_encountered_weak_collections(Smi::kZero); | |
| 4397 } | |
| 4398 } | |
| 3954 | 4399 |
| 3955 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 4400 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| 3956 for (Page* p : old_space_evacuation_pages_) { | 4401 for (Page* p : old_space_evacuation_pages_) { |
| 3957 if (!p->IsEvacuationCandidate()) continue; | 4402 if (!p->IsEvacuationCandidate()) continue; |
| 3958 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 4403 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3959 MarkingState::Internal(p).SetLiveBytes(0); | 4404 MarkingState::Internal(p).SetLiveBytes(0); |
| 3960 CHECK(p->SweepingDone()); | 4405 CHECK(p->SweepingDone()); |
| 3961 space->ReleasePage(p); | 4406 space->ReleasePage(p); |
| 3962 } | 4407 } |
| 3963 old_space_evacuation_pages_.Rewind(0); | 4408 old_space_evacuation_pages_.Rewind(0); |
| (...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4169 // The target is always in old space, we don't have to record the slot in | 4614 // The target is always in old space, we don't have to record the slot in |
| 4170 // the old-to-new remembered set. | 4615 // the old-to-new remembered set. |
| 4171 DCHECK(!heap()->InNewSpace(target)); | 4616 DCHECK(!heap()->InNewSpace(target)); |
| 4172 RecordRelocSlot(host, &rinfo, target); | 4617 RecordRelocSlot(host, &rinfo, target); |
| 4173 } | 4618 } |
| 4174 } | 4619 } |
| 4175 } | 4620 } |
| 4176 | 4621 |
| 4177 } // namespace internal | 4622 } // namespace internal |
| 4178 } // namespace v8 | 4623 } // namespace v8 |
| OLD | NEW |