OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
297 HeapObject* object = HeapObject::cast(*current); | 297 HeapObject* object = HeapObject::cast(*current); |
298 if (heap()->InNewSpace(object)) { | 298 if (heap()->InNewSpace(object)) { |
299 CHECK(heap()->InToSpace(object)); | 299 CHECK(heap()->InToSpace(object)); |
300 } | 300 } |
301 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | 301 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
302 } | 302 } |
303 } | 303 } |
304 } | 304 } |
305 }; | 305 }; |
306 | 306 |
307 class YoungGenerationEvacuationVerifier : public EvacuationVerifier { | |
308 public: | |
309 explicit YoungGenerationEvacuationVerifier(Heap* heap) | |
310 : EvacuationVerifier(heap) {} | |
311 | |
312 void Run() override { | |
313 VerifyRoots(VISIT_ALL_IN_SCAVENGE); | |
314 VerifyEvacuation(heap_->new_space()); | |
315 VerifyEvacuation(heap_->old_space()); | |
316 VerifyEvacuation(heap_->code_space()); | |
317 VerifyEvacuation(heap_->map_space()); | |
318 } | |
319 | |
320 protected: | |
321 void VerifyPointers(Object** start, Object** end) override { | |
322 for (Object** current = start; current < end; current++) { | |
323 if ((*current)->IsHeapObject()) { | |
324 HeapObject* object = HeapObject::cast(*current); | |
325 if (heap()->InNewSpace(object)) { | |
326 CHECK(heap()->InToSpace(object)); | |
327 } | |
328 } | |
329 } | |
330 } | |
331 }; | |
332 | |
307 } // namespace | 333 } // namespace |
308 #endif // VERIFY_HEAP | 334 #endif // VERIFY_HEAP |
309 | 335 |
310 // ============================================================================= | 336 // ============================================================================= |
311 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector | 337 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector |
312 // ============================================================================= | 338 // ============================================================================= |
313 | 339 |
314 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( | 340 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( |
315 int pages, intptr_t live_bytes) { | 341 int pages, intptr_t live_bytes) { |
316 if (!FLAG_parallel_compaction) return 1; | 342 if (!FLAG_parallel_compaction) return 1; |
(...skipping 1276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1593 *p = the_hole; | 1619 *p = the_hole; |
1594 } | 1620 } |
1595 } | 1621 } |
1596 } | 1622 } |
1597 } | 1623 } |
1598 | 1624 |
1599 private: | 1625 private: |
1600 Heap* heap_; | 1626 Heap* heap_; |
1601 }; | 1627 }; |
1602 | 1628 |
1629 // Helper class for pruning the string table. | |
1630 class YoungGenerationExternalStringTableCleaner : public RootVisitor { | |
1631 public: | |
1632 YoungGenerationExternalStringTableCleaner( | |
1633 const MinorMarkCompactCollector& collector) | |
1634 : heap_(collector.heap()), collector_(collector) {} | |
1635 | |
1636 void VisitRootPointers(Root root, Object** start, Object** end) override { | |
1637 DCHECK_EQ(static_cast<int>(root), | |
1638 static_cast<int>(Root::kExternalStringsTable)); | |
1639 // Visit all HeapObject pointers in [start, end). | |
1640 for (Object** p = start; p < end; p++) { | |
1641 Object* o = *p; | |
1642 if (o->IsHeapObject()) { | |
1643 HeapObject* heap_object = HeapObject::cast(o); | |
1644 if (ObjectMarking::IsWhite(heap_object, | |
1645 collector_.marking_state(heap_object))) { | |
1646 if (o->IsExternalString()) { | |
1647 heap_->FinalizeExternalString(String::cast(*p)); | |
1648 } else { | |
1649 // The original external string may have been internalized. | |
1650 DCHECK(o->IsThinString()); | |
1651 } | |
1652 // Set the entry to the_hole_value (as deleted). | |
1653 *p = heap_->the_hole_value(); | |
1654 } | |
1655 } | |
1656 } | |
1657 } | |
1658 | |
1659 private: | |
1660 Heap* heap_; | |
1661 const MinorMarkCompactCollector& collector_; | |
1662 }; | |
1663 | |
1664 // Marked young generation objects and all old generation objects will be | |
1665 // retained. | |
1666 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer { | |
1667 public: | |
1668 explicit MinorMarkCompactWeakObjectRetainer( | |
1669 const MinorMarkCompactCollector& collector) | |
1670 : collector_(collector) {} | |
1671 | |
1672 virtual Object* RetainAs(Object* object) { | |
1673 HeapObject* heap_object = HeapObject::cast(object); | |
1674 if (!collector_.heap()->InNewSpace(heap_object)) return object; | |
1675 | |
1676 DCHECK(!ObjectMarking::IsGrey(heap_object, | |
1677 collector_.marking_state(heap_object))); | |
1678 if (ObjectMarking::IsBlack(heap_object, | |
1679 collector_.marking_state(heap_object))) { | |
1680 return object; | |
1681 } | |
1682 return nullptr; | |
1683 } | |
1684 | |
1685 private: | |
1686 const MinorMarkCompactCollector& collector_; | |
1687 }; | |
1688 | |
1603 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects | 1689 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
1604 // are retained. | 1690 // are retained. |
1605 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { | 1691 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
1606 public: | 1692 public: |
1607 virtual Object* RetainAs(Object* object) { | 1693 virtual Object* RetainAs(Object* object) { |
1608 HeapObject* heap_object = HeapObject::cast(object); | 1694 HeapObject* heap_object = HeapObject::cast(object); |
1609 DCHECK(!ObjectMarking::IsGrey(heap_object, | 1695 DCHECK(!ObjectMarking::IsGrey(heap_object, |
1610 MarkingState::Internal(heap_object))); | 1696 MarkingState::Internal(heap_object))); |
1611 if (ObjectMarking::IsBlack(heap_object, | 1697 if (ObjectMarking::IsBlack(heap_object, |
1612 MarkingState::Internal(heap_object))) { | 1698 MarkingState::Internal(heap_object))) { |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1717 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { | 1803 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { |
1718 DCHECK_EQ(host, rinfo->host()); | 1804 DCHECK_EQ(host, rinfo->host()); |
1719 DCHECK(rinfo->rmode() == RelocInfo::CELL); | 1805 DCHECK(rinfo->rmode() == RelocInfo::CELL); |
1720 Cell* cell = rinfo->target_cell(); | 1806 Cell* cell = rinfo->target_cell(); |
1721 // The cell is always in old space, we don't have to record the slot in | 1807 // The cell is always in old space, we don't have to record the slot in |
1722 // the old-to-new remembered set. | 1808 // the old-to-new remembered set. |
1723 DCHECK(!collector_->heap()->InNewSpace(cell)); | 1809 DCHECK(!collector_->heap()->InNewSpace(cell)); |
1724 collector_->RecordRelocSlot(host, rinfo, cell); | 1810 collector_->RecordRelocSlot(host, rinfo, cell); |
1725 } | 1811 } |
1726 | 1812 |
1727 // Entries that will never move. | |
1728 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { | 1813 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { |
1729 DCHECK_EQ(host, rinfo->host()); | 1814 DCHECK_EQ(host, rinfo->host()); |
1730 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); | 1815 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); |
1731 Code* stub = rinfo->code_age_stub(); | 1816 Code* stub = rinfo->code_age_stub(); |
1732 USE(stub); | 1817 USE(stub); |
1733 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); | 1818 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); |
1734 } | 1819 } |
1735 | 1820 |
1736 // Entries that are skipped for recording. | 1821 // Entries that are skipped for recording. |
1737 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} | 1822 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1774 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, | 1859 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, |
1775 int size) final { | 1860 int size) final { |
1776 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) { | 1861 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) { |
1777 PROFILE(heap_->isolate(), | 1862 PROFILE(heap_->isolate(), |
1778 CodeMoveEvent(AbstractCode::cast(src), dst->address())); | 1863 CodeMoveEvent(AbstractCode::cast(src), dst->address())); |
1779 } | 1864 } |
1780 heap_->OnMoveEvent(dst, src, size); | 1865 heap_->OnMoveEvent(dst, src, size); |
1781 } | 1866 } |
1782 }; | 1867 }; |
1783 | 1868 |
1869 class YoungGenerationMigrationObserver final : public MigrationObserver { | |
1870 public: | |
1871 YoungGenerationMigrationObserver( | |
1872 Heap* heap, MarkCompactCollector* mark_compact_collector, | |
1873 std::vector<HeapObject*>* black_allocation_objects) | |
1874 : MigrationObserver(heap), | |
1875 mark_compact_collector_(mark_compact_collector), | |
1876 black_allocation_objects_(black_allocation_objects) {} | |
1877 | |
1878 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, | |
1879 int size) final { | |
1880 // Migrate color to old generation marking in case the object survived young | |
1881 // generation garbage collection. | |
1882 if (heap_->incremental_marking()->IsMarking()) { | |
1883 const MarkingState state = mark_compact_collector_->marking_state(dst); | |
1884 if (ObjectMarking::IsBlack(dst, state)) { | |
1885 DCHECK(heap_->incremental_marking()->black_allocation()); | |
1886 base::LockGuard<base::Mutex> guard(&mutex_); | |
1887 black_allocation_objects_->push_back(dst); | |
1888 } else { | |
1889 heap_->incremental_marking()->TransferColor<MarkBit::ATOMIC>(src, dst); | |
1890 } | |
1891 } | |
1892 } | |
1893 | |
1894 protected: | |
1895 base::Mutex mutex_; | |
1896 MarkCompactCollector* mark_compact_collector_; | |
1897 std::vector<HeapObject*>* black_allocation_objects_; | |
1898 }; | |
1899 | |
1900 class YoungGenerationRecordMigratedSlotVisitor final | |
1901 : public RecordMigratedSlotVisitor { | |
1902 public: | |
1903 explicit YoungGenerationRecordMigratedSlotVisitor( | |
1904 MarkCompactCollector* collector) | |
1905 : RecordMigratedSlotVisitor(collector) {} | |
1906 | |
1907 inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final { | |
1908 Address code_entry = Memory::Address_at(code_entry_slot); | |
1909 if (Page::FromAddress(code_entry)->IsEvacuationCandidate() && | |
1910 IsLive(host)) { | |
1911 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), | |
1912 nullptr, CODE_ENTRY_SLOT, | |
1913 code_entry_slot); | |
1914 } | |
1915 } | |
1916 | |
1917 void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
1918 void VisitDebugTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
1919 void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final { | |
1920 UNREACHABLE(); | |
1921 } | |
1922 void VisitCellPointer(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
1923 void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final { | |
1924 UNREACHABLE(); | |
1925 } | |
1926 | |
1927 private: | |
1928 // Only record slots for host objects that are considered as live by the full | |
1929 // collector. | |
1930 inline bool IsLive(HeapObject* object) { | |
1931 return ObjectMarking::IsBlack(object, collector_->marking_state(object)); | |
1932 } | |
1933 | |
1934 inline void RecordMigratedSlot(HeapObject* host, Object* value, | |
1935 Address slot) final { | |
1936 if (value->IsHeapObject()) { | |
1937 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); | |
1938 if (p->InNewSpace()) { | |
1939 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); | |
1940 } else if (p->IsEvacuationCandidate() && IsLive(host)) { | |
1941 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); | |
1942 } | |
1943 } | |
1944 } | |
1945 }; | |
1946 | |
1784 class HeapObjectVisitor { | 1947 class HeapObjectVisitor { |
1785 public: | 1948 public: |
1786 virtual ~HeapObjectVisitor() {} | 1949 virtual ~HeapObjectVisitor() {} |
1787 virtual bool Visit(HeapObject* object) = 0; | 1950 virtual bool Visit(HeapObject* object) = 0; |
1788 }; | 1951 }; |
1789 | 1952 |
1790 class EvacuateVisitorBase : public HeapObjectVisitor { | 1953 class EvacuateVisitorBase : public HeapObjectVisitor { |
1791 public: | 1954 public: |
1792 void AddObserver(MigrationObserver* observer) { | 1955 void AddObserver(MigrationObserver* observer) { |
1793 migration_function_ = RawMigrateObject<MigrationMode::kObserved>; | 1956 migration_function_ = RawMigrateObject<MigrationMode::kObserved>; |
(...skipping 650 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2444 heap_object); | 2607 heap_object); |
2445 return KEEP_SLOT; | 2608 return KEEP_SLOT; |
2446 } | 2609 } |
2447 return REMOVE_SLOT; | 2610 return REMOVE_SLOT; |
2448 } | 2611 } |
2449 | 2612 |
2450 static bool IsUnmarkedObject(Heap* heap, Object** p) { | 2613 static bool IsUnmarkedObject(Heap* heap, Object** p) { |
2451 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); | 2614 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); |
2452 return heap->InNewSpace(*p) && | 2615 return heap->InNewSpace(*p) && |
2453 !ObjectMarking::IsBlack(HeapObject::cast(*p), | 2616 !ObjectMarking::IsBlack(HeapObject::cast(*p), |
2454 MarkingState::Internal(HeapObject::cast(*p))); | 2617 MarkingState::External(HeapObject::cast(*p))); |
2455 } | 2618 } |
2456 | 2619 |
2457 void MinorMarkCompactCollector::MarkLiveObjects() { | 2620 void MinorMarkCompactCollector::MarkLiveObjects() { |
2458 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); | 2621 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); |
2459 | 2622 |
2460 PostponeInterruptsScope postpone(isolate()); | 2623 PostponeInterruptsScope postpone(isolate()); |
2461 | 2624 |
2462 StaticYoungGenerationMarkingVisitor::Initialize(heap()); | 2625 StaticYoungGenerationMarkingVisitor::Initialize(heap()); |
2463 RootMarkingVisitor root_visitor(this); | 2626 RootMarkingVisitor root_visitor(this); |
2464 | 2627 |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2496 heap()->IterateEncounteredWeakCollections(&root_visitor); | 2659 heap()->IterateEncounteredWeakCollections(&root_visitor); |
2497 ProcessMarkingDeque(); | 2660 ProcessMarkingDeque(); |
2498 } | 2661 } |
2499 | 2662 |
2500 { | 2663 { |
2501 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); | 2664 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); |
2502 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( | 2665 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
2503 &IsUnmarkedObject); | 2666 &IsUnmarkedObject); |
2504 isolate() | 2667 isolate() |
2505 ->global_handles() | 2668 ->global_handles() |
2506 ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>( | 2669 ->IterateNewSpaceWeakUnmodifiedRoots< |
2507 &root_visitor); | 2670 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor); |
2508 ProcessMarkingDeque(); | 2671 ProcessMarkingDeque(); |
2509 } | 2672 } |
2510 | 2673 |
2511 marking_deque()->StopUsing(); | 2674 marking_deque()->StopUsing(); |
2512 } | 2675 } |
2513 | 2676 |
2514 void MinorMarkCompactCollector::ProcessMarkingDeque() { | 2677 void MinorMarkCompactCollector::ProcessMarkingDeque() { |
2515 EmptyMarkingDeque(); | 2678 EmptyMarkingDeque(); |
2516 DCHECK(!marking_deque()->overflowed()); | 2679 DCHECK(!marking_deque()->overflowed()); |
2517 DCHECK(marking_deque()->IsEmpty()); | 2680 DCHECK(marking_deque()->IsEmpty()); |
(...skipping 11 matching lines...) Expand all Loading... | |
2529 object, MarkingState::External(object)))); | 2692 object, MarkingState::External(object)))); |
2530 | 2693 |
2531 Map* map = object->map(); | 2694 Map* map = object->map(); |
2532 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( | 2695 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( |
2533 object, MarkingState::External(object)))); | 2696 object, MarkingState::External(object)))); |
2534 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); | 2697 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); |
2535 } | 2698 } |
2536 } | 2699 } |
2537 | 2700 |
2538 void MinorMarkCompactCollector::CollectGarbage() { | 2701 void MinorMarkCompactCollector::CollectGarbage() { |
2702 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); | |
2703 | |
2539 MarkLiveObjects(); | 2704 MarkLiveObjects(); |
2540 | 2705 ClearNonLiveReferences(); |
2541 #ifdef VERIFY_HEAP | 2706 #ifdef VERIFY_HEAP |
2542 if (FLAG_verify_heap) { | 2707 if (FLAG_verify_heap) { |
2543 YoungGenerationMarkingVerifier verifier(heap()); | 2708 YoungGenerationMarkingVerifier verifier(heap()); |
2544 verifier.Run(); | 2709 verifier.Run(); |
2545 } | 2710 } |
2546 #endif // VERIFY_HEAP | 2711 #endif // VERIFY_HEAP |
2712 | |
2713 std::vector<HeapObject*> black_allocation_objects; | |
2714 EvacuateNewSpace(&black_allocation_objects); | |
2715 #ifdef VERIFY_HEAP | |
2716 if (FLAG_verify_heap) { | |
2717 YoungGenerationEvacuationVerifier verifier(heap()); | |
2718 verifier.Run(); | |
2719 } | |
2720 #endif // VERIFY_HEAP | |
2721 | |
2722 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); | |
2723 | |
2724 // Process black allocation objects after updating pointers as we otherwise | |
2725 // would end up with objects on the marking deque that potentially forward | |
2726 // to white objects. | |
2727 // TODO(mlippautz): Instead of processing them explicitly, we should just add | |
Hannes Payer (out of office)
2017/05/02 16:17:26
If you add them to the main marking deque while ev
Michael Lippautz
2017/05/02 16:56:17
The overhead is not noticeable. Also, it looks lik
| |
2728 // them to the marking deque for further processing. | |
2729 { | |
2730 TRACE_GC(heap()->tracer(), | |
2731 GCTracer::Scope::MINOR_MC_EVACUATE_PROCESS_BLACK_ALLOCATION); | |
2732 for (HeapObject* object : black_allocation_objects) { | |
2733 CHECK(ObjectMarking::IsBlack(object, MarkingState::Internal(object))); | |
2734 heap()->incremental_marking()->IterateBlackObject(object); | |
2735 } | |
2736 heap()->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); | |
2737 } | |
2738 | |
2739 { | |
2740 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); | |
2741 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), | |
2742 heap()->new_space()->FromSpaceEnd())) { | |
2743 marking_state(p).ClearLiveness(); | |
2744 } | |
2745 } | |
2746 } | |
2747 | |
2748 void MinorMarkCompactCollector::ClearNonLiveReferences() { | |
2749 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); | |
2750 | |
2751 { | |
2752 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); | |
2753 // Internalized strings are always stored in old space, so there is no need | |
2754 // to clean them here. | |
2755 YoungGenerationExternalStringTableCleaner external_visitor(*this); | |
2756 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); | |
2757 heap()->external_string_table_.CleanUpNewSpaceStrings(); | |
2758 } | |
2759 | |
2760 { | |
2761 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); | |
2762 // Process the weak references. | |
2763 MinorMarkCompactWeakObjectRetainer retainer(*this); | |
2764 heap()->ProcessYoungWeakReferences(&retainer); | |
2765 } | |
2766 } | |
2767 | |
2768 void MinorMarkCompactCollector::EvacuatePrologue() { | |
2769 NewSpace* new_space = heap()->new_space(); | |
2770 // Append the list of new space pages to be processed. | |
2771 for (Page* p : PageRange(new_space->bottom(), new_space->top())) { | |
2772 new_space_evacuation_pages_.Add(p); | |
2773 } | |
2774 new_space->Flip(); | |
2775 new_space->ResetAllocationInfo(); | |
2776 } | |
2777 | |
2778 void MinorMarkCompactCollector::EvacuateEpilogue() { | |
2779 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | |
2780 } | |
2781 | |
2782 void MinorMarkCompactCollector::EvacuateNewSpace( | |
2783 std::vector<HeapObject*>* black_allocation_objects) { | |
2784 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | |
2785 Heap::RelocationLock relocation_lock(heap()); | |
2786 | |
2787 { | |
2788 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); | |
2789 EvacuatePrologue(); | |
2790 } | |
2791 | |
2792 { | |
2793 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | |
2794 EvacuatePagesInParallel(black_allocation_objects); | |
2795 } | |
2796 | |
2797 UpdatePointersAfterEvacuation(); | |
2798 | |
2799 { | |
2800 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); | |
2801 if (!heap()->new_space()->Rebalance()) { | |
2802 FatalProcessOutOfMemory("NewSpace::Rebalance"); | |
2803 } | |
2804 } | |
2805 | |
2806 // Give pages that are queued to be freed back to the OS. | |
2807 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | |
2808 | |
2809 { | |
2810 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | |
2811 // TODO(mlippautz): Implement page promotion. | |
2812 new_space_evacuation_pages_.Rewind(0); | |
2813 } | |
2814 | |
2815 { | |
2816 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); | |
2817 EvacuateEpilogue(); | |
2818 } | |
2547 } | 2819 } |
2548 | 2820 |
2549 void MarkCompactCollector::MarkLiveObjects() { | 2821 void MarkCompactCollector::MarkLiveObjects() { |
2550 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); | 2822 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); |
2551 // The recursive GC marker detects when it is nearing stack overflow, | 2823 // The recursive GC marker detects when it is nearing stack overflow, |
2552 // and switches to a different marking system. JS interrupts interfere | 2824 // and switches to a different marking system. JS interrupts interfere |
2553 // with the C stack limit check. | 2825 // with the C stack limit check. |
2554 PostponeInterruptsScope postpone(isolate()); | 2826 PostponeInterruptsScope postpone(isolate()); |
2555 | 2827 |
2556 { | 2828 { |
(...skipping 836 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3393 success = false; | 3665 success = false; |
3394 } else { | 3666 } else { |
3395 ArrayBufferTracker::ProcessBuffers( | 3667 ArrayBufferTracker::ProcessBuffers( |
3396 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | 3668 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
3397 } | 3669 } |
3398 break; | 3670 break; |
3399 } | 3671 } |
3400 return success; | 3672 return success; |
3401 } | 3673 } |
3402 | 3674 |
3675 class YoungGenerationEvacuator : public Evacuator { | |
3676 public: | |
3677 YoungGenerationEvacuator(MinorMarkCompactCollector* collector, | |
3678 RecordMigratedSlotVisitor* record_visitor) | |
3679 : Evacuator(collector->heap(), record_visitor), collector_(collector) {} | |
3680 | |
3681 protected: | |
3682 bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; | |
3683 | |
3684 MinorMarkCompactCollector* collector_; | |
3685 }; | |
3686 | |
3687 bool YoungGenerationEvacuator::RawEvacuatePage(Page* page, | |
3688 intptr_t* live_bytes) { | |
3689 bool success = false; | |
3690 LiveObjectVisitor object_visitor; | |
3691 const MarkingState state = collector_->marking_state(page); | |
3692 *live_bytes = state.live_bytes(); | |
3693 switch (ComputeEvacuationMode(page)) { | |
3694 case kObjectsNewToOld: | |
3695 success = object_visitor.VisitBlackObjects( | |
3696 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); | |
3697 DCHECK(success); | |
3698 ArrayBufferTracker::ProcessBuffers( | |
3699 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
3700 break; | |
3701 case kPageNewToOld: | |
3702 // TODO(mlippautz): Implement page promotion. | |
3703 UNREACHABLE(); | |
3704 break; | |
3705 case kPageNewToNew: | |
3706 // TODO(mlippautz): Implement page promotion. | |
3707 UNREACHABLE(); | |
3708 break; | |
3709 case kObjectsOldToOld: | |
3710 UNREACHABLE(); | |
3711 break; | |
3712 } | |
3713 return success; | |
3714 } | |
3715 | |
3403 class EvacuationJobTraits { | 3716 class EvacuationJobTraits { |
3404 public: | 3717 public: |
3405 typedef int* PerPageData; // Pointer to number of aborted pages. | 3718 struct PageData { |
3719 int* abandoned_pages; // Pointer to number of aborted pages. | |
3720 MarkingState marking_state; | |
3721 }; | |
3722 | |
3723 typedef PageData PerPageData; | |
3406 typedef Evacuator* PerTaskData; | 3724 typedef Evacuator* PerTaskData; |
3407 | 3725 |
3408 static const bool NeedSequentialFinalization = true; | 3726 static const bool NeedSequentialFinalization = true; |
3409 | 3727 |
3410 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3728 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
3411 MemoryChunk* chunk, PerPageData) { | 3729 MemoryChunk* chunk, PerPageData) { |
3412 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3730 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
3413 } | 3731 } |
3414 | 3732 |
3415 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3733 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
(...skipping 12 matching lines...) Expand all Loading... | |
3428 if (success) { | 3746 if (success) { |
3429 DCHECK(p->IsEvacuationCandidate()); | 3747 DCHECK(p->IsEvacuationCandidate()); |
3430 DCHECK(p->SweepingDone()); | 3748 DCHECK(p->SweepingDone()); |
3431 p->Unlink(); | 3749 p->Unlink(); |
3432 } else { | 3750 } else { |
3433 // We have partially compacted the page, i.e., some objects may have | 3751 // We have partially compacted the page, i.e., some objects may have |
3434 // moved, others are still in place. | 3752 // moved, others are still in place. |
3435 p->ClearEvacuationCandidate(); | 3753 p->ClearEvacuationCandidate(); |
3436 // Slots have already been recorded so we just need to add it to the | 3754 // Slots have already been recorded so we just need to add it to the |
3437 // sweeper, which will happen after updating pointers. | 3755 // sweeper, which will happen after updating pointers. |
3438 *data += 1; | 3756 *data.abandoned_pages += 1; |
3439 } | 3757 } |
3440 break; | 3758 break; |
3441 default: | 3759 default: |
3442 UNREACHABLE(); | 3760 UNREACHABLE(); |
3443 } | 3761 } |
3444 } | 3762 } |
3445 }; | 3763 }; |
3446 | 3764 |
3447 template <class Evacuator, class Collector> | 3765 template <class Evacuator, class Collector> |
3448 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( | 3766 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( |
3449 Collector* collector, PageParallelJob<EvacuationJobTraits>* job, | 3767 Collector* collector, PageParallelJob<EvacuationJobTraits>* job, |
3450 RecordMigratedSlotVisitor* record_visitor, const intptr_t live_bytes, | 3768 RecordMigratedSlotVisitor* record_visitor, |
3769 MigrationObserver* migration_observer, const intptr_t live_bytes, | |
3451 const int& abandoned_pages) { | 3770 const int& abandoned_pages) { |
3452 // Used for trace summary. | 3771 // Used for trace summary. |
3453 double compaction_speed = 0; | 3772 double compaction_speed = 0; |
3454 if (FLAG_trace_evacuation) { | 3773 if (FLAG_trace_evacuation) { |
3455 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3774 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3456 } | 3775 } |
3457 | 3776 |
3458 const bool profiling = | 3777 const bool profiling = |
3459 heap()->isolate()->is_profiling() || | 3778 heap()->isolate()->is_profiling() || |
3460 heap()->isolate()->logger()->is_logging_code_events() || | 3779 heap()->isolate()->logger()->is_logging_code_events() || |
3461 heap()->isolate()->heap_profiler()->is_tracking_object_moves(); | 3780 heap()->isolate()->heap_profiler()->is_tracking_object_moves(); |
3462 ProfilingMigrationObserver profiling_observer(heap()); | 3781 ProfilingMigrationObserver profiling_observer(heap()); |
3463 | 3782 |
3464 const int wanted_num_tasks = | 3783 const int wanted_num_tasks = |
3465 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); | 3784 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); |
3466 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; | 3785 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; |
3467 for (int i = 0; i < wanted_num_tasks; i++) { | 3786 for (int i = 0; i < wanted_num_tasks; i++) { |
3468 evacuators[i] = new Evacuator(collector, record_visitor); | 3787 evacuators[i] = new Evacuator(collector, record_visitor); |
3469 if (profiling) evacuators[i]->AddObserver(&profiling_observer); | 3788 if (profiling) evacuators[i]->AddObserver(&profiling_observer); |
3789 if (migration_observer != nullptr) | |
3790 evacuators[i]->AddObserver(migration_observer); | |
3470 } | 3791 } |
3471 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | 3792 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
3472 const Address top = heap()->new_space()->top(); | 3793 const Address top = heap()->new_space()->top(); |
3473 for (int i = 0; i < wanted_num_tasks; i++) { | 3794 for (int i = 0; i < wanted_num_tasks; i++) { |
3474 evacuators[i]->Finalize(); | 3795 evacuators[i]->Finalize(); |
3475 // Try to find the last LAB that was used for new space allocation in | 3796 // Try to find the last LAB that was used for new space allocation in |
3476 // evacuation tasks. If it was adjacent to the current top, move top back. | 3797 // evacuation tasks. If it was adjacent to the current top, move top back. |
3477 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); | 3798 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); |
3478 if (info.limit() != nullptr && info.limit() == top) { | 3799 if (info.limit() != nullptr && info.limit() == top) { |
3479 DCHECK_NOT_NULL(info.top()); | 3800 DCHECK_NOT_NULL(info.top()); |
(...skipping 18 matching lines...) Expand all Loading... | |
3498 | 3819 |
3499 void MarkCompactCollector::EvacuatePagesInParallel() { | 3820 void MarkCompactCollector::EvacuatePagesInParallel() { |
3500 PageParallelJob<EvacuationJobTraits> job( | 3821 PageParallelJob<EvacuationJobTraits> job( |
3501 heap_, heap_->isolate()->cancelable_task_manager(), | 3822 heap_, heap_->isolate()->cancelable_task_manager(), |
3502 &page_parallel_job_semaphore_); | 3823 &page_parallel_job_semaphore_); |
3503 | 3824 |
3504 int abandoned_pages = 0; | 3825 int abandoned_pages = 0; |
3505 intptr_t live_bytes = 0; | 3826 intptr_t live_bytes = 0; |
3506 for (Page* page : old_space_evacuation_pages_) { | 3827 for (Page* page : old_space_evacuation_pages_) { |
3507 live_bytes += MarkingState::Internal(page).live_bytes(); | 3828 live_bytes += MarkingState::Internal(page).live_bytes(); |
3508 job.AddPage(page, &abandoned_pages); | 3829 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
3509 } | 3830 } |
3510 | 3831 |
3511 const bool reduce_memory = heap()->ShouldReduceMemory(); | 3832 const bool reduce_memory = heap()->ShouldReduceMemory(); |
3512 const Address age_mark = heap()->new_space()->age_mark(); | 3833 const Address age_mark = heap()->new_space()->age_mark(); |
3513 for (Page* page : new_space_evacuation_pages_) { | 3834 for (Page* page : new_space_evacuation_pages_) { |
3514 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); | 3835 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); |
3515 live_bytes += live_bytes_on_page; | 3836 live_bytes += live_bytes_on_page; |
3516 if (!reduce_memory && !page->NeverEvacuate() && | 3837 if (!reduce_memory && !page->NeverEvacuate() && |
3517 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && | 3838 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && |
3518 !page->Contains(age_mark) && | 3839 !page->Contains(age_mark) && |
3519 heap()->CanExpandOldGeneration(live_bytes_on_page)) { | 3840 heap()->CanExpandOldGeneration(live_bytes_on_page)) { |
3520 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { | 3841 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
3521 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); | 3842 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); |
3522 } else { | 3843 } else { |
3523 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); | 3844 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); |
3524 } | 3845 } |
3525 } | 3846 } |
3526 | 3847 |
3527 job.AddPage(page, &abandoned_pages); | 3848 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
3528 } | 3849 } |
3529 DCHECK_GE(job.NumberOfPages(), 1); | 3850 DCHECK_GE(job.NumberOfPages(), 1); |
3530 | 3851 |
3531 RecordMigratedSlotVisitor record_visitor(this); | 3852 RecordMigratedSlotVisitor record_visitor(this); |
3532 CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor, | 3853 CreateAndExecuteEvacuationTasks<FullEvacuator>( |
3533 live_bytes, abandoned_pages); | 3854 this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages); |
3855 } | |
3856 | |
3857 void MinorMarkCompactCollector::EvacuatePagesInParallel( | |
3858 std::vector<HeapObject*>* black_allocation_objects) { | |
3859 PageParallelJob<EvacuationJobTraits> job( | |
3860 heap_, heap_->isolate()->cancelable_task_manager(), | |
3861 &page_parallel_job_semaphore_); | |
3862 int abandoned_pages = 0; | |
3863 intptr_t live_bytes = 0; | |
3864 | |
3865 for (Page* page : new_space_evacuation_pages_) { | |
3866 intptr_t live_bytes_on_page = marking_state(page).live_bytes(); | |
3867 live_bytes += live_bytes_on_page; | |
3868 // TODO(mlippautz): Implement page promotion. | |
3869 job.AddPage(page, {&abandoned_pages, marking_state(page)}); | |
3870 } | |
3871 DCHECK_GE(job.NumberOfPages(), 1); | |
3872 | |
3873 YoungGenerationMigrationObserver observer( | |
3874 heap(), heap()->mark_compact_collector(), black_allocation_objects); | |
3875 YoungGenerationRecordMigratedSlotVisitor record_visitor( | |
3876 heap()->mark_compact_collector()); | |
3877 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( | |
3878 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); | |
3534 } | 3879 } |
3535 | 3880 |
3536 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3881 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
3537 public: | 3882 public: |
3538 virtual Object* RetainAs(Object* object) { | 3883 virtual Object* RetainAs(Object* object) { |
3539 if (object->IsHeapObject()) { | 3884 if (object->IsHeapObject()) { |
3540 HeapObject* heap_object = HeapObject::cast(object); | 3885 HeapObject* heap_object = HeapObject::cast(object); |
3541 MapWord map_word = heap_object->map_word(); | 3886 MapWord map_word = heap_object->map_word(); |
3542 if (map_word.IsForwardingAddress()) { | 3887 if (map_word.IsForwardingAddress()) { |
3543 return map_word.ToForwardingAddress(); | 3888 return map_word.ToForwardingAddress(); |
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3848 UpdateUntypedPointers(heap, chunk); | 4193 UpdateUntypedPointers(heap, chunk); |
3849 UpdateTypedPointers(heap, chunk); | 4194 UpdateTypedPointers(heap, chunk); |
3850 return true; | 4195 return true; |
3851 } | 4196 } |
3852 static const bool NeedSequentialFinalization = false; | 4197 static const bool NeedSequentialFinalization = false; |
3853 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4198 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3854 } | 4199 } |
3855 | 4200 |
3856 private: | 4201 private: |
3857 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | 4202 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
4203 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); | |
3858 if (type == OLD_TO_NEW) { | 4204 if (type == OLD_TO_NEW) { |
3859 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { | 4205 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
3860 return CheckAndUpdateOldToNewSlot(heap, slot); | 4206 return CheckAndUpdateOldToNewSlot(heap, slot); |
3861 }); | 4207 }); |
3862 } else { | 4208 } else { |
3863 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { | 4209 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { |
3864 return UpdateSlot(reinterpret_cast<Object**>(slot)); | 4210 return UpdateSlot(reinterpret_cast<Object**>(slot)); |
3865 }); | 4211 }); |
3866 } | 4212 } |
3867 } | 4213 } |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3954 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4300 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3955 RememberedSet<type>::IterateMemoryChunks( | 4301 RememberedSet<type>::IterateMemoryChunks( |
3956 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 4302 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
3957 int num_pages = job.NumberOfPages(); | 4303 int num_pages = job.NumberOfPages(); |
3958 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 4304 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
3959 job.Run(num_tasks, [](int i) { return 0; }); | 4305 job.Run(num_tasks, [](int i) { return 0; }); |
3960 } | 4306 } |
3961 | 4307 |
3962 class ToSpacePointerUpdateJobTraits { | 4308 class ToSpacePointerUpdateJobTraits { |
3963 public: | 4309 public: |
3964 typedef std::pair<Address, Address> PerPageData; | 4310 struct PageData { |
4311 Address start; | |
4312 Address end; | |
4313 MarkingState marking_state; | |
4314 }; | |
4315 | |
4316 typedef PageData PerPageData; | |
3965 typedef PointersUpdatingVisitor* PerTaskData; | 4317 typedef PointersUpdatingVisitor* PerTaskData; |
3966 | 4318 |
3967 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 4319 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
3968 MemoryChunk* chunk, PerPageData limits) { | 4320 MemoryChunk* chunk, PerPageData page_data) { |
3969 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | 4321 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
3970 // New->new promoted pages contain garbage so they require iteration | 4322 // New->new promoted pages contain garbage so they require iteration |
3971 // using markbits. | 4323 // using markbits. |
3972 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); | 4324 ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data); |
3973 } else { | 4325 } else { |
3974 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); | 4326 ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); |
3975 } | 4327 } |
3976 return true; | 4328 return true; |
3977 } | 4329 } |
3978 | 4330 |
3979 static const bool NeedSequentialFinalization = false; | 4331 static const bool NeedSequentialFinalization = false; |
3980 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4332 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3981 } | 4333 } |
3982 | 4334 |
3983 private: | 4335 private: |
3984 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, | 4336 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
3985 MemoryChunk* chunk, | 4337 MemoryChunk* chunk, |
3986 PerPageData limits) { | 4338 PerPageData page_data) { |
3987 for (Address cur = limits.first; cur < limits.second;) { | 4339 for (Address cur = page_data.start; cur < page_data.end;) { |
3988 HeapObject* object = HeapObject::FromAddress(cur); | 4340 HeapObject* object = HeapObject::FromAddress(cur); |
3989 Map* map = object->map(); | 4341 Map* map = object->map(); |
3990 int size = object->SizeFromMap(map); | 4342 int size = object->SizeFromMap(map); |
3991 object->IterateBody(map->instance_type(), size, visitor); | 4343 object->IterateBody(map->instance_type(), size, visitor); |
3992 cur += size; | 4344 cur += size; |
3993 } | 4345 } |
3994 } | 4346 } |
3995 | 4347 |
3996 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, | 4348 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
3997 MemoryChunk* chunk, | 4349 MemoryChunk* chunk, |
3998 PerPageData limits) { | 4350 PerPageData page_data) { |
3999 LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk)); | 4351 LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state); |
4000 HeapObject* object = NULL; | 4352 HeapObject* object = NULL; |
4001 while ((object = it.Next()) != NULL) { | 4353 while ((object = it.Next()) != NULL) { |
4002 Map* map = object->map(); | 4354 Map* map = object->map(); |
4003 int size = object->SizeFromMap(map); | 4355 int size = object->SizeFromMap(map); |
4004 object->IterateBody(map->instance_type(), size, visitor); | 4356 object->IterateBody(map->instance_type(), size, visitor); |
4005 } | 4357 } |
4006 } | 4358 } |
4007 }; | 4359 }; |
4008 | 4360 |
4009 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 4361 template <class MarkingStateProvider> |
4362 void UpdateToSpacePointersInParallel( | |
4363 Heap* heap, base::Semaphore* semaphore, | |
4364 const MarkingStateProvider& marking_state_provider) { | |
4010 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 4365 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
4011 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4366 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
4012 Address space_start = heap->new_space()->bottom(); | 4367 Address space_start = heap->new_space()->bottom(); |
4013 Address space_end = heap->new_space()->top(); | 4368 Address space_end = heap->new_space()->top(); |
4014 for (Page* page : PageRange(space_start, space_end)) { | 4369 for (Page* page : PageRange(space_start, space_end)) { |
4015 Address start = | 4370 Address start = |
4016 page->Contains(space_start) ? space_start : page->area_start(); | 4371 page->Contains(space_start) ? space_start : page->area_start(); |
4017 Address end = page->Contains(space_end) ? space_end : page->area_end(); | 4372 Address end = page->Contains(space_end) ? space_end : page->area_end(); |
4018 job.AddPage(page, std::make_pair(start, end)); | 4373 job.AddPage(page, {start, end, marking_state_provider.marking_state(page)}); |
4019 } | 4374 } |
4020 PointersUpdatingVisitor visitor; | 4375 PointersUpdatingVisitor visitor; |
4021 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; | 4376 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
4022 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 4377 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
4023 } | 4378 } |
4024 | 4379 |
4025 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 4380 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
4026 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 4381 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
4027 | 4382 |
4028 | 4383 |
4029 { | 4384 { |
4030 TRACE_GC(heap()->tracer(), | 4385 TRACE_GC(heap()->tracer(), |
4031 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 4386 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
4032 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); | 4387 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
4388 *this); | |
4033 // Update roots. | 4389 // Update roots. |
4034 PointersUpdatingVisitor updating_visitor; | 4390 PointersUpdatingVisitor updating_visitor; |
4035 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 4391 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
4036 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | 4392 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
4037 } | 4393 } |
4038 | 4394 |
4039 { | 4395 { |
4040 Heap* heap = this->heap(); | 4396 Heap* heap = this->heap(); |
4041 TRACE_GC(heap->tracer(), | 4397 TRACE_GC(heap->tracer(), |
4042 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 4398 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
4043 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); | 4399 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); |
4044 } | 4400 } |
4045 | 4401 |
4046 { | 4402 { |
4047 TRACE_GC(heap()->tracer(), | 4403 TRACE_GC(heap()->tracer(), |
4048 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 4404 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
4049 // Update pointers from external string table. | 4405 // Update pointers from external string table. |
4050 heap_->UpdateReferencesInExternalStringTable( | 4406 heap_->UpdateReferencesInExternalStringTable( |
4051 &UpdateReferenceInExternalStringTableEntry); | 4407 &UpdateReferenceInExternalStringTableEntry); |
4052 | 4408 |
4053 EvacuationWeakObjectRetainer evacuation_object_retainer; | 4409 EvacuationWeakObjectRetainer evacuation_object_retainer; |
4054 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | 4410 heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
4055 } | 4411 } |
4056 } | 4412 } |
4057 | 4413 |
4414 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { | |
4415 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | |
4416 | |
4417 PointersUpdatingVisitor updating_visitor; | |
4418 | |
4419 { | |
4420 TRACE_GC(heap()->tracer(), | |
4421 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | |
4422 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, | |
4423 *this); | |
4424 // TODO(mlippautz): Iteration mode is not optimal as we process all | |
4425 // global handles. Find a way to only process the ones related to new | |
4426 // space. | |
4427 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | |
4428 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | |
4429 } | |
4430 | |
4431 { | |
4432 TRACE_GC(heap()->tracer(), | |
4433 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | |
4434 | |
4435 EvacuationWeakObjectRetainer evacuation_object_retainer; | |
4436 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | |
4437 | |
4438 // Update pointers from external string table. | |
4439 heap()->UpdateNewSpaceReferencesInExternalStringTable( | |
4440 &UpdateReferenceInExternalStringTableEntry); | |
4441 heap()->IterateEncounteredWeakCollections(&updating_visitor); | |
4442 } | |
4443 } | |
4058 | 4444 |
4059 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 4445 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
4060 for (Page* p : old_space_evacuation_pages_) { | 4446 for (Page* p : old_space_evacuation_pages_) { |
4061 if (!p->IsEvacuationCandidate()) continue; | 4447 if (!p->IsEvacuationCandidate()) continue; |
4062 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 4448 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
4063 MarkingState::Internal(p).SetLiveBytes(0); | 4449 MarkingState::Internal(p).SetLiveBytes(0); |
4064 CHECK(p->SweepingDone()); | 4450 CHECK(p->SweepingDone()); |
4065 space->ReleasePage(p); | 4451 space->ReleasePage(p); |
4066 } | 4452 } |
4067 old_space_evacuation_pages_.Rewind(0); | 4453 old_space_evacuation_pages_.Rewind(0); |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4270 // The target is always in old space, we don't have to record the slot in | 4656 // The target is always in old space, we don't have to record the slot in |
4271 // the old-to-new remembered set. | 4657 // the old-to-new remembered set. |
4272 DCHECK(!heap()->InNewSpace(target)); | 4658 DCHECK(!heap()->InNewSpace(target)); |
4273 RecordRelocSlot(host, &rinfo, target); | 4659 RecordRelocSlot(host, &rinfo, target); |
4274 } | 4660 } |
4275 } | 4661 } |
4276 } | 4662 } |
4277 | 4663 |
4278 } // namespace internal | 4664 } // namespace internal |
4279 } // namespace v8 | 4665 } // namespace v8 |
OLD | NEW |