OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 286 matching lines...) Loading... | |
297 HeapObject* object = HeapObject::cast(*current); | 297 HeapObject* object = HeapObject::cast(*current); |
298 if (heap()->InNewSpace(object)) { | 298 if (heap()->InNewSpace(object)) { |
299 CHECK(heap()->InToSpace(object)); | 299 CHECK(heap()->InToSpace(object)); |
300 } | 300 } |
301 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | 301 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
302 } | 302 } |
303 } | 303 } |
304 } | 304 } |
305 }; | 305 }; |
306 | 306 |
307 class YoungGenerationEvacuationVerifier : public EvacuationVerifier { | |
308 public: | |
309 explicit YoungGenerationEvacuationVerifier(Heap* heap) | |
310 : EvacuationVerifier(heap) {} | |
311 | |
312 void Run() override { | |
313 VerifyRoots(VISIT_ALL_IN_SCAVENGE); | |
314 VerifyEvacuation(heap_->new_space()); | |
315 VerifyEvacuation(heap_->old_space()); | |
316 VerifyEvacuation(heap_->code_space()); | |
317 VerifyEvacuation(heap_->map_space()); | |
318 } | |
319 | |
320 protected: | |
321 void VerifyPointers(Object** start, Object** end) override { | |
322 for (Object** current = start; current < end; current++) { | |
323 if ((*current)->IsHeapObject()) { | |
324 HeapObject* object = HeapObject::cast(*current); | |
325 if (heap()->InNewSpace(object)) { | |
ulan
2017/05/03 14:27:20
CHECK_IMPLIES would be more readable here.
Michael Lippautz
2017/05/03 15:08:03
Done.
| |
326 CHECK(heap()->InToSpace(object)); | |
327 } | |
328 } | |
329 } | |
330 } | |
331 }; | |
332 | |
307 } // namespace | 333 } // namespace |
308 #endif // VERIFY_HEAP | 334 #endif // VERIFY_HEAP |
309 | 335 |
310 // ============================================================================= | 336 // ============================================================================= |
311 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector | 337 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector |
312 // ============================================================================= | 338 // ============================================================================= |
313 | 339 |
314 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( | 340 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( |
315 int pages, intptr_t live_bytes) { | 341 int pages, intptr_t live_bytes) { |
316 if (!FLAG_parallel_compaction) return 1; | 342 if (!FLAG_parallel_compaction) return 1; |
(...skipping 116 matching lines...) Loading... | |
433 | 459 |
434 #ifdef VERIFY_HEAP | 460 #ifdef VERIFY_HEAP |
435 if (FLAG_verify_heap) { | 461 if (FLAG_verify_heap) { |
436 FullMarkingVerifier verifier(heap()); | 462 FullMarkingVerifier verifier(heap()); |
437 verifier.Run(); | 463 verifier.Run(); |
438 } | 464 } |
439 #endif | 465 #endif |
440 | 466 |
441 StartSweepSpaces(); | 467 StartSweepSpaces(); |
442 | 468 |
443 EvacuateNewSpaceAndCandidates(); | 469 Evacuate(); |
444 | 470 |
445 Finish(); | 471 Finish(); |
446 } | 472 } |
447 | 473 |
448 #ifdef VERIFY_HEAP | 474 #ifdef VERIFY_HEAP |
449 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { | 475 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
450 for (Page* p : *space) { | 476 for (Page* p : *space) { |
451 const MarkingState state = MarkingState::Internal(p); | 477 const MarkingState state = MarkingState::Internal(p); |
452 CHECK(state.bitmap()->IsClean()); | 478 CHECK(state.bitmap()->IsClean()); |
453 CHECK_EQ(0, state.live_bytes()); | 479 CHECK_EQ(0, state.live_bytes()); |
(...skipping 1139 matching lines...) Loading... | |
1593 *p = the_hole; | 1619 *p = the_hole; |
1594 } | 1620 } |
1595 } | 1621 } |
1596 } | 1622 } |
1597 } | 1623 } |
1598 | 1624 |
1599 private: | 1625 private: |
1600 Heap* heap_; | 1626 Heap* heap_; |
1601 }; | 1627 }; |
1602 | 1628 |
1629 // Helper class for pruning the string table. | |
1630 class YoungGenerationExternalStringTableCleaner : public RootVisitor { | |
1631 public: | |
1632 YoungGenerationExternalStringTableCleaner( | |
1633 const MinorMarkCompactCollector& collector) | |
1634 : heap_(collector.heap()), collector_(collector) {} | |
1635 | |
1636 void VisitRootPointers(Root root, Object** start, Object** end) override { | |
1637 DCHECK_EQ(static_cast<int>(root), | |
1638 static_cast<int>(Root::kExternalStringsTable)); | |
1639 // Visit all HeapObject pointers in [start, end). | |
1640 for (Object** p = start; p < end; p++) { | |
1641 Object* o = *p; | |
1642 if (o->IsHeapObject()) { | |
1643 HeapObject* heap_object = HeapObject::cast(o); | |
1644 if (ObjectMarking::IsWhite(heap_object, | |
1645 collector_.marking_state(heap_object))) { | |
1646 if (o->IsExternalString()) { | |
1647 heap_->FinalizeExternalString(String::cast(*p)); | |
1648 } else { | |
1649 // The original external string may have been internalized. | |
1650 DCHECK(o->IsThinString()); | |
1651 } | |
1652 // Set the entry to the_hole_value (as deleted). | |
1653 *p = heap_->the_hole_value(); | |
1654 } | |
1655 } | |
1656 } | |
1657 } | |
1658 | |
1659 private: | |
1660 Heap* heap_; | |
1661 const MinorMarkCompactCollector& collector_; | |
1662 }; | |
1663 | |
1664 // Marked young generation objects and all old generation objects will be | |
1665 // retained. | |
1666 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer { | |
1667 public: | |
1668 explicit MinorMarkCompactWeakObjectRetainer( | |
1669 const MinorMarkCompactCollector& collector) | |
1670 : collector_(collector) {} | |
1671 | |
1672 virtual Object* RetainAs(Object* object) { | |
1673 HeapObject* heap_object = HeapObject::cast(object); | |
1674 if (!collector_.heap()->InNewSpace(heap_object)) return object; | |
1675 | |
1676 DCHECK(!ObjectMarking::IsGrey(heap_object, | |
1677 collector_.marking_state(heap_object))); | |
1678 if (ObjectMarking::IsBlack(heap_object, | |
1679 collector_.marking_state(heap_object))) { | |
1680 return object; | |
1681 } | |
1682 return nullptr; | |
1683 } | |
1684 | |
1685 private: | |
1686 const MinorMarkCompactCollector& collector_; | |
1687 }; | |
1688 | |
1603 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects | 1689 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
1604 // are retained. | 1690 // are retained. |
1605 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { | 1691 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
1606 public: | 1692 public: |
1607 virtual Object* RetainAs(Object* object) { | 1693 virtual Object* RetainAs(Object* object) { |
1608 HeapObject* heap_object = HeapObject::cast(object); | 1694 HeapObject* heap_object = HeapObject::cast(object); |
1609 DCHECK(!ObjectMarking::IsGrey(heap_object, | 1695 DCHECK(!ObjectMarking::IsGrey(heap_object, |
1610 MarkingState::Internal(heap_object))); | 1696 MarkingState::Internal(heap_object))); |
1611 if (ObjectMarking::IsBlack(heap_object, | 1697 if (ObjectMarking::IsBlack(heap_object, |
1612 MarkingState::Internal(heap_object))) { | 1698 MarkingState::Internal(heap_object))) { |
(...skipping 104 matching lines...) Loading... | |
1717 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { | 1803 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { |
1718 DCHECK_EQ(host, rinfo->host()); | 1804 DCHECK_EQ(host, rinfo->host()); |
1719 DCHECK(rinfo->rmode() == RelocInfo::CELL); | 1805 DCHECK(rinfo->rmode() == RelocInfo::CELL); |
1720 Cell* cell = rinfo->target_cell(); | 1806 Cell* cell = rinfo->target_cell(); |
1721 // The cell is always in old space, we don't have to record the slot in | 1807 // The cell is always in old space, we don't have to record the slot in |
1722 // the old-to-new remembered set. | 1808 // the old-to-new remembered set. |
1723 DCHECK(!collector_->heap()->InNewSpace(cell)); | 1809 DCHECK(!collector_->heap()->InNewSpace(cell)); |
1724 collector_->RecordRelocSlot(host, rinfo, cell); | 1810 collector_->RecordRelocSlot(host, rinfo, cell); |
1725 } | 1811 } |
1726 | 1812 |
1727 // Entries that will never move. | |
1728 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { | 1813 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { |
1729 DCHECK_EQ(host, rinfo->host()); | 1814 DCHECK_EQ(host, rinfo->host()); |
1730 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); | 1815 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); |
1731 Code* stub = rinfo->code_age_stub(); | 1816 Code* stub = rinfo->code_age_stub(); |
1732 USE(stub); | 1817 USE(stub); |
1733 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); | 1818 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); |
1734 } | 1819 } |
1735 | 1820 |
1736 // Entries that are skipped for recording. | 1821 // Entries that are skipped for recording. |
1737 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} | 1822 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} |
(...skipping 36 matching lines...) Loading... | |
1774 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, | 1859 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, |
1775 int size) final { | 1860 int size) final { |
1776 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) { | 1861 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) { |
1777 PROFILE(heap_->isolate(), | 1862 PROFILE(heap_->isolate(), |
1778 CodeMoveEvent(AbstractCode::cast(src), dst->address())); | 1863 CodeMoveEvent(AbstractCode::cast(src), dst->address())); |
1779 } | 1864 } |
1780 heap_->OnMoveEvent(dst, src, size); | 1865 heap_->OnMoveEvent(dst, src, size); |
1781 } | 1866 } |
1782 }; | 1867 }; |
1783 | 1868 |
1869 class YoungGenerationMigrationObserver final : public MigrationObserver { | |
1870 public: | |
1871 YoungGenerationMigrationObserver(Heap* heap, | |
1872 MarkCompactCollector* mark_compact_collector) | |
1873 : MigrationObserver(heap), | |
1874 mark_compact_collector_(mark_compact_collector) {} | |
1875 | |
1876 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, | |
1877 int size) final { | |
1878 // Migrate color to old generation marking in case the object survived young | |
1879 // generation garbage collection. | |
1880 if (heap_->incremental_marking()->IsMarking()) { | |
1881 DCHECK(ObjectMarking::IsWhite( | |
1882 dst, mark_compact_collector_->marking_state(dst))); | |
1883 heap_->incremental_marking()->TransferColor<MarkBit::ATOMIC>(src, dst); | |
Hannes Payer (out of office)
2017/05/03 14:44:53
Note: Ideally, this one would be non-atomic.
Michael Lippautz
2017/05/03 15:08:03
Acknowledged.
| |
1884 } | |
1885 } | |
1886 | |
1887 protected: | |
1888 base::Mutex mutex_; | |
1889 MarkCompactCollector* mark_compact_collector_; | |
1890 }; | |
1891 | |
1892 class YoungGenerationRecordMigratedSlotVisitor final | |
1893 : public RecordMigratedSlotVisitor { | |
1894 public: | |
1895 explicit YoungGenerationRecordMigratedSlotVisitor( | |
1896 MarkCompactCollector* collector) | |
1897 : RecordMigratedSlotVisitor(collector) {} | |
1898 | |
1899 inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final { | |
1900 Address code_entry = Memory::Address_at(code_entry_slot); | |
1901 if (Page::FromAddress(code_entry)->IsEvacuationCandidate() && | |
1902 IsLive(host)) { | |
1903 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), | |
1904 nullptr, CODE_ENTRY_SLOT, | |
1905 code_entry_slot); | |
1906 } | |
1907 } | |
1908 | |
1909 void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
1910 void VisitDebugTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
1911 void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final { | |
1912 UNREACHABLE(); | |
1913 } | |
1914 void VisitCellPointer(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } | |
1915 void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final { | |
1916 UNREACHABLE(); | |
1917 } | |
1918 | |
1919 private: | |
1920 // Only record slots for host objects that are considered as live by the full | |
1921 // collector. | |
1922 inline bool IsLive(HeapObject* object) { | |
1923 return ObjectMarking::IsBlack(object, collector_->marking_state(object)); | |
ulan
2017/05/03 14:27:20
How do we know that the collector_ is the full col
Michael Lippautz
2017/05/03 15:08:03
collector_ is of type MarkCompactCollector, not th
| |
1924 } | |
1925 | |
1926 inline void RecordMigratedSlot(HeapObject* host, Object* value, | |
1927 Address slot) final { | |
1928 if (value->IsHeapObject()) { | |
1929 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); | |
1930 if (p->InNewSpace()) { | |
1931 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); | |
1932 } else if (p->IsEvacuationCandidate() && IsLive(host)) { | |
1933 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); | |
1934 } | |
1935 } | |
1936 } | |
1937 }; | |
1938 | |
1784 class HeapObjectVisitor { | 1939 class HeapObjectVisitor { |
1785 public: | 1940 public: |
1786 virtual ~HeapObjectVisitor() {} | 1941 virtual ~HeapObjectVisitor() {} |
1787 virtual bool Visit(HeapObject* object) = 0; | 1942 virtual bool Visit(HeapObject* object) = 0; |
1788 }; | 1943 }; |
1789 | 1944 |
1790 class EvacuateVisitorBase : public HeapObjectVisitor { | 1945 class EvacuateVisitorBase : public HeapObjectVisitor { |
1791 public: | 1946 public: |
1792 void AddObserver(MigrationObserver* observer) { | 1947 void AddObserver(MigrationObserver* observer) { |
1793 migration_function_ = RawMigrateObject<MigrationMode::kObserved>; | 1948 migration_function_ = RawMigrateObject<MigrationMode::kObserved>; |
(...skipping 562 matching lines...) Loading... | |
2356 return KEEP_SLOT; | 2511 return KEEP_SLOT; |
2357 } | 2512 } |
2358 ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state); | 2513 ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state); |
2359 StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(), | 2514 StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(), |
2360 heap_object); | 2515 heap_object); |
2361 return KEEP_SLOT; | 2516 return KEEP_SLOT; |
2362 } | 2517 } |
2363 return REMOVE_SLOT; | 2518 return REMOVE_SLOT; |
2364 } | 2519 } |
2365 | 2520 |
2366 static bool IsUnmarkedObject(Heap* heap, Object** p) { | 2521 static bool IsUnmarkedObject(Heap* heap, Object** p) { |
ulan
2017/05/03 14:27:20
Nit: change the name to mention young generation s
Michael Lippautz
2017/05/03 15:08:03
Done.
| |
2367 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); | 2522 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); |
2368 return heap->InNewSpace(*p) && | 2523 return heap->InNewSpace(*p) && |
2369 !ObjectMarking::IsBlack(HeapObject::cast(*p), | 2524 !ObjectMarking::IsBlack(HeapObject::cast(*p), |
2370 MarkingState::Internal(HeapObject::cast(*p))); | 2525 MarkingState::External(HeapObject::cast(*p))); |
2371 } | 2526 } |
2372 | 2527 |
2373 void MinorMarkCompactCollector::MarkLiveObjects() { | 2528 void MinorMarkCompactCollector::MarkLiveObjects() { |
2374 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); | 2529 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); |
2375 | 2530 |
2376 PostponeInterruptsScope postpone(isolate()); | 2531 PostponeInterruptsScope postpone(isolate()); |
2377 | 2532 |
2378 StaticYoungGenerationMarkingVisitor::Initialize(heap()); | 2533 StaticYoungGenerationMarkingVisitor::Initialize(heap()); |
2379 RootMarkingVisitor root_visitor(this); | 2534 RootMarkingVisitor root_visitor(this); |
2380 | 2535 |
(...skipping 31 matching lines...) Loading... | |
2412 heap()->IterateEncounteredWeakCollections(&root_visitor); | 2567 heap()->IterateEncounteredWeakCollections(&root_visitor); |
2413 ProcessMarkingDeque(); | 2568 ProcessMarkingDeque(); |
2414 } | 2569 } |
2415 | 2570 |
2416 { | 2571 { |
2417 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); | 2572 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); |
2418 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( | 2573 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
2419 &IsUnmarkedObject); | 2574 &IsUnmarkedObject); |
2420 isolate() | 2575 isolate() |
2421 ->global_handles() | 2576 ->global_handles() |
2422 ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>( | 2577 ->IterateNewSpaceWeakUnmodifiedRoots< |
2423 &root_visitor); | 2578 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor); |
2424 ProcessMarkingDeque(); | 2579 ProcessMarkingDeque(); |
2425 } | 2580 } |
2426 | 2581 |
2427 marking_deque()->StopUsing(); | 2582 marking_deque()->StopUsing(); |
2428 } | 2583 } |
2429 | 2584 |
2430 void MinorMarkCompactCollector::ProcessMarkingDeque() { | 2585 void MinorMarkCompactCollector::ProcessMarkingDeque() { |
2431 EmptyMarkingDeque(); | 2586 EmptyMarkingDeque(); |
2432 DCHECK(!marking_deque()->overflowed()); | 2587 DCHECK(!marking_deque()->overflowed()); |
2433 DCHECK(marking_deque()->IsEmpty()); | 2588 DCHECK(marking_deque()->IsEmpty()); |
(...skipping 11 matching lines...) Loading... | |
2445 object, MarkingState::External(object)))); | 2600 object, MarkingState::External(object)))); |
2446 | 2601 |
2447 Map* map = object->map(); | 2602 Map* map = object->map(); |
2448 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( | 2603 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( |
2449 object, MarkingState::External(object)))); | 2604 object, MarkingState::External(object)))); |
2450 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); | 2605 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); |
2451 } | 2606 } |
2452 } | 2607 } |
2453 | 2608 |
2454 void MinorMarkCompactCollector::CollectGarbage() { | 2609 void MinorMarkCompactCollector::CollectGarbage() { |
2610 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); | |
2611 | |
2455 MarkLiveObjects(); | 2612 MarkLiveObjects(); |
2456 | 2613 ClearNonLiveReferences(); |
2457 #ifdef VERIFY_HEAP | 2614 #ifdef VERIFY_HEAP |
2458 if (FLAG_verify_heap) { | 2615 if (FLAG_verify_heap) { |
2459 YoungGenerationMarkingVerifier verifier(heap()); | 2616 YoungGenerationMarkingVerifier verifier(heap()); |
2460 verifier.Run(); | 2617 verifier.Run(); |
2461 } | 2618 } |
2462 #endif // VERIFY_HEAP | 2619 #endif // VERIFY_HEAP |
2620 | |
2621 Evacuate(); | |
2622 #ifdef VERIFY_HEAP | |
2623 if (FLAG_verify_heap) { | |
2624 YoungGenerationEvacuationVerifier verifier(heap()); | |
2625 verifier.Run(); | |
2626 } | |
2627 #endif // VERIFY_HEAP | |
2628 | |
2629 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); | |
2630 | |
2631 { | |
2632 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); | |
2633 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), | |
2634 heap()->new_space()->FromSpaceEnd())) { | |
2635 marking_state(p).ClearLiveness(); | |
2636 } | |
2637 } | |
2638 } | |
2639 | |
2640 void MinorMarkCompactCollector::ClearNonLiveReferences() { | |
2641 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); | |
2642 | |
2643 { | |
2644 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); | |
2645 // Internalized strings are always stored in old space, so there is no need | |
2646 // to clean them here. | |
2647 YoungGenerationExternalStringTableCleaner external_visitor(*this); | |
2648 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); | |
2649 heap()->external_string_table_.CleanUpNewSpaceStrings(); | |
2650 } | |
2651 | |
2652 { | |
2653 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); | |
2654 // Process the weak references. | |
2655 MinorMarkCompactWeakObjectRetainer retainer(*this); | |
2656 heap()->ProcessYoungWeakReferences(&retainer); | |
2657 } | |
2658 } | |
2659 | |
2660 void MinorMarkCompactCollector::EvacuatePrologue() { | |
2661 NewSpace* new_space = heap()->new_space(); | |
2662 // Append the list of new space pages to be processed. | |
2663 for (Page* p : PageRange(new_space->bottom(), new_space->top())) { | |
2664 new_space_evacuation_pages_.Add(p); | |
2665 } | |
2666 new_space->Flip(); | |
2667 new_space->ResetAllocationInfo(); | |
2668 } | |
2669 | |
2670 void MinorMarkCompactCollector::EvacuateEpilogue() { | |
2671 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | |
2672 } | |
2673 | |
2674 void MinorMarkCompactCollector::Evacuate() { | |
2675 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | |
2676 Heap::RelocationLock relocation_lock(heap()); | |
2677 | |
2678 { | |
2679 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); | |
2680 EvacuatePrologue(); | |
2681 } | |
2682 | |
2683 { | |
2684 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | |
2685 EvacuatePagesInParallel(); | |
2686 } | |
2687 | |
2688 UpdatePointersAfterEvacuation(); | |
2689 | |
2690 { | |
2691 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); | |
2692 if (!heap()->new_space()->Rebalance()) { | |
2693 FatalProcessOutOfMemory("NewSpace::Rebalance"); | |
2694 } | |
2695 } | |
2696 | |
2697 // Give pages that are queued to be freed back to the OS. | |
2698 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | |
2699 | |
2700 { | |
2701 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | |
2702 // TODO(mlippautz): Implement page promotion. | |
2703 new_space_evacuation_pages_.Rewind(0); | |
2704 } | |
2705 | |
2706 { | |
2707 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); | |
2708 EvacuateEpilogue(); | |
2709 } | |
2463 } | 2710 } |
2464 | 2711 |
2465 void MarkCompactCollector::MarkLiveObjects() { | 2712 void MarkCompactCollector::MarkLiveObjects() { |
2466 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); | 2713 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); |
2467 // The recursive GC marker detects when it is nearing stack overflow, | 2714 // The recursive GC marker detects when it is nearing stack overflow, |
2468 // and switches to a different marking system. JS interrupts interfere | 2715 // and switches to a different marking system. JS interrupts interfere |
2469 // with the C stack limit check. | 2716 // with the C stack limit check. |
2470 PostponeInterruptsScope postpone(isolate()); | 2717 PostponeInterruptsScope postpone(isolate()); |
2471 | 2718 |
2472 { | 2719 { |
(...skipping 836 matching lines...) Loading... | |
3309 success = false; | 3556 success = false; |
3310 } else { | 3557 } else { |
3311 ArrayBufferTracker::ProcessBuffers( | 3558 ArrayBufferTracker::ProcessBuffers( |
3312 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | 3559 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
3313 } | 3560 } |
3314 break; | 3561 break; |
3315 } | 3562 } |
3316 return success; | 3563 return success; |
3317 } | 3564 } |
3318 | 3565 |
3566 class YoungGenerationEvacuator : public Evacuator { | |
3567 public: | |
3568 YoungGenerationEvacuator(MinorMarkCompactCollector* collector, | |
3569 RecordMigratedSlotVisitor* record_visitor) | |
3570 : Evacuator(collector->heap(), record_visitor), collector_(collector) {} | |
3571 | |
3572 protected: | |
3573 bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; | |
3574 | |
3575 MinorMarkCompactCollector* collector_; | |
3576 }; | |
3577 | |
3578 bool YoungGenerationEvacuator::RawEvacuatePage(Page* page, | |
3579 intptr_t* live_bytes) { | |
3580 bool success = false; | |
3581 LiveObjectVisitor object_visitor; | |
3582 const MarkingState state = collector_->marking_state(page); | |
3583 *live_bytes = state.live_bytes(); | |
3584 switch (ComputeEvacuationMode(page)) { | |
3585 case kObjectsNewToOld: | |
3586 success = object_visitor.VisitBlackObjects( | |
3587 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); | |
3588 DCHECK(success); | |
3589 ArrayBufferTracker::ProcessBuffers( | |
3590 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
3591 break; | |
3592 case kPageNewToOld: | |
3593 // TODO(mlippautz): Implement page promotion. | |
3594 UNREACHABLE(); | |
3595 break; | |
3596 case kPageNewToNew: | |
3597 // TODO(mlippautz): Implement page promotion. | |
3598 UNREACHABLE(); | |
3599 break; | |
3600 case kObjectsOldToOld: | |
3601 UNREACHABLE(); | |
3602 break; | |
3603 } | |
3604 return success; | |
3605 } | |
3606 | |
3319 class EvacuationJobTraits { | 3607 class EvacuationJobTraits { |
3320 public: | 3608 public: |
3321 typedef int* PerPageData; // Pointer to number of aborted pages. | 3609 struct PageData { |
3610 int* abandoned_pages; // Pointer to number of aborted pages. | |
3611 MarkingState marking_state; | |
3612 }; | |
3613 | |
3614 typedef PageData PerPageData; | |
3322 typedef Evacuator* PerTaskData; | 3615 typedef Evacuator* PerTaskData; |
3323 | 3616 |
3324 static const bool NeedSequentialFinalization = true; | 3617 static const bool NeedSequentialFinalization = true; |
3325 | 3618 |
3326 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3619 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
3327 MemoryChunk* chunk, PerPageData) { | 3620 MemoryChunk* chunk, PerPageData) { |
3328 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3621 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
3329 } | 3622 } |
3330 | 3623 |
3331 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3624 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
(...skipping 12 matching lines...) Loading... | |
3344 if (success) { | 3637 if (success) { |
3345 DCHECK(p->IsEvacuationCandidate()); | 3638 DCHECK(p->IsEvacuationCandidate()); |
3346 DCHECK(p->SweepingDone()); | 3639 DCHECK(p->SweepingDone()); |
3347 p->Unlink(); | 3640 p->Unlink(); |
3348 } else { | 3641 } else { |
3349 // We have partially compacted the page, i.e., some objects may have | 3642 // We have partially compacted the page, i.e., some objects may have |
3350 // moved, others are still in place. | 3643 // moved, others are still in place. |
3351 p->ClearEvacuationCandidate(); | 3644 p->ClearEvacuationCandidate(); |
3352 // Slots have already been recorded so we just need to add it to the | 3645 // Slots have already been recorded so we just need to add it to the |
3353 // sweeper, which will happen after updating pointers. | 3646 // sweeper, which will happen after updating pointers. |
3354 *data += 1; | 3647 *data.abandoned_pages += 1; |
3355 } | 3648 } |
3356 break; | 3649 break; |
3357 default: | 3650 default: |
3358 UNREACHABLE(); | 3651 UNREACHABLE(); |
3359 } | 3652 } |
3360 } | 3653 } |
3361 }; | 3654 }; |
3362 | 3655 |
3363 template <class Evacuator, class Collector> | 3656 template <class Evacuator, class Collector> |
3364 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( | 3657 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( |
3365 Collector* collector, PageParallelJob<EvacuationJobTraits>* job, | 3658 Collector* collector, PageParallelJob<EvacuationJobTraits>* job, |
3366 RecordMigratedSlotVisitor* record_visitor, const intptr_t live_bytes, | 3659 RecordMigratedSlotVisitor* record_visitor, |
3660 MigrationObserver* migration_observer, const intptr_t live_bytes, | |
3367 const int& abandoned_pages) { | 3661 const int& abandoned_pages) { |
3368 // Used for trace summary. | 3662 // Used for trace summary. |
3369 double compaction_speed = 0; | 3663 double compaction_speed = 0; |
3370 if (FLAG_trace_evacuation) { | 3664 if (FLAG_trace_evacuation) { |
3371 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3665 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3372 } | 3666 } |
3373 | 3667 |
3374 const bool profiling = | 3668 const bool profiling = |
3375 heap()->isolate()->is_profiling() || | 3669 heap()->isolate()->is_profiling() || |
3376 heap()->isolate()->logger()->is_logging_code_events() || | 3670 heap()->isolate()->logger()->is_logging_code_events() || |
3377 heap()->isolate()->heap_profiler()->is_tracking_object_moves(); | 3671 heap()->isolate()->heap_profiler()->is_tracking_object_moves(); |
3378 ProfilingMigrationObserver profiling_observer(heap()); | 3672 ProfilingMigrationObserver profiling_observer(heap()); |
3379 | 3673 |
3380 const int wanted_num_tasks = | 3674 const int wanted_num_tasks = |
3381 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); | 3675 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); |
3382 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; | 3676 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; |
3383 for (int i = 0; i < wanted_num_tasks; i++) { | 3677 for (int i = 0; i < wanted_num_tasks; i++) { |
3384 evacuators[i] = new Evacuator(collector, record_visitor); | 3678 evacuators[i] = new Evacuator(collector, record_visitor); |
3385 if (profiling) evacuators[i]->AddObserver(&profiling_observer); | 3679 if (profiling) evacuators[i]->AddObserver(&profiling_observer); |
3680 if (migration_observer != nullptr) | |
3681 evacuators[i]->AddObserver(migration_observer); | |
3386 } | 3682 } |
3387 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | 3683 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
3388 const Address top = heap()->new_space()->top(); | 3684 const Address top = heap()->new_space()->top(); |
3389 for (int i = 0; i < wanted_num_tasks; i++) { | 3685 for (int i = 0; i < wanted_num_tasks; i++) { |
3390 evacuators[i]->Finalize(); | 3686 evacuators[i]->Finalize(); |
3391 // Try to find the last LAB that was used for new space allocation in | 3687 // Try to find the last LAB that was used for new space allocation in |
3392 // evacuation tasks. If it was adjacent to the current top, move top back. | 3688 // evacuation tasks. If it was adjacent to the current top, move top back. |
3393 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); | 3689 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); |
3394 if (info.limit() != nullptr && info.limit() == top) { | 3690 if (info.limit() != nullptr && info.limit() == top) { |
3395 DCHECK_NOT_NULL(info.top()); | 3691 DCHECK_NOT_NULL(info.top()); |
(...skipping 18 matching lines...) Loading... | |
3414 | 3710 |
3415 void MarkCompactCollector::EvacuatePagesInParallel() { | 3711 void MarkCompactCollector::EvacuatePagesInParallel() { |
3416 PageParallelJob<EvacuationJobTraits> job( | 3712 PageParallelJob<EvacuationJobTraits> job( |
3417 heap_, heap_->isolate()->cancelable_task_manager(), | 3713 heap_, heap_->isolate()->cancelable_task_manager(), |
3418 &page_parallel_job_semaphore_); | 3714 &page_parallel_job_semaphore_); |
3419 | 3715 |
3420 int abandoned_pages = 0; | 3716 int abandoned_pages = 0; |
3421 intptr_t live_bytes = 0; | 3717 intptr_t live_bytes = 0; |
3422 for (Page* page : old_space_evacuation_pages_) { | 3718 for (Page* page : old_space_evacuation_pages_) { |
3423 live_bytes += MarkingState::Internal(page).live_bytes(); | 3719 live_bytes += MarkingState::Internal(page).live_bytes(); |
3424 job.AddPage(page, &abandoned_pages); | 3720 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
3425 } | 3721 } |
3426 | 3722 |
3427 const bool reduce_memory = heap()->ShouldReduceMemory(); | 3723 const bool reduce_memory = heap()->ShouldReduceMemory(); |
3428 const Address age_mark = heap()->new_space()->age_mark(); | 3724 const Address age_mark = heap()->new_space()->age_mark(); |
3429 for (Page* page : new_space_evacuation_pages_) { | 3725 for (Page* page : new_space_evacuation_pages_) { |
3430 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); | 3726 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); |
3431 live_bytes += live_bytes_on_page; | 3727 live_bytes += live_bytes_on_page; |
3432 if (!reduce_memory && !page->NeverEvacuate() && | 3728 if (!reduce_memory && !page->NeverEvacuate() && |
3433 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && | 3729 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && |
3434 !page->Contains(age_mark) && | 3730 !page->Contains(age_mark) && |
3435 heap()->CanExpandOldGeneration(live_bytes_on_page)) { | 3731 heap()->CanExpandOldGeneration(live_bytes_on_page)) { |
3436 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { | 3732 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
3437 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); | 3733 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); |
3438 } else { | 3734 } else { |
3439 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); | 3735 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); |
3440 } | 3736 } |
3441 } | 3737 } |
3442 | 3738 |
3443 job.AddPage(page, &abandoned_pages); | 3739 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
3444 } | 3740 } |
3445 DCHECK_GE(job.NumberOfPages(), 1); | 3741 DCHECK_GE(job.NumberOfPages(), 1); |
3446 | 3742 |
3447 RecordMigratedSlotVisitor record_visitor(this); | 3743 RecordMigratedSlotVisitor record_visitor(this); |
3448 CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor, | 3744 CreateAndExecuteEvacuationTasks<FullEvacuator>( |
3449 live_bytes, abandoned_pages); | 3745 this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages); |
3746 } | |
3747 | |
3748 void MinorMarkCompactCollector::EvacuatePagesInParallel() { | |
3749 PageParallelJob<EvacuationJobTraits> job( | |
3750 heap_, heap_->isolate()->cancelable_task_manager(), | |
3751 &page_parallel_job_semaphore_); | |
3752 int abandoned_pages = 0; | |
3753 intptr_t live_bytes = 0; | |
3754 | |
3755 for (Page* page : new_space_evacuation_pages_) { | |
3756 intptr_t live_bytes_on_page = marking_state(page).live_bytes(); | |
3757 live_bytes += live_bytes_on_page; | |
3758 // TODO(mlippautz): Implement page promotion. | |
3759 job.AddPage(page, {&abandoned_pages, marking_state(page)}); | |
3760 } | |
3761 DCHECK_GE(job.NumberOfPages(), 1); | |
3762 | |
3763 YoungGenerationMigrationObserver observer(heap(), | |
3764 heap()->mark_compact_collector()); | |
3765 YoungGenerationRecordMigratedSlotVisitor record_visitor( | |
3766 heap()->mark_compact_collector()); | |
3767 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( | |
3768 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); | |
3450 } | 3769 } |
3451 | 3770 |
3452 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3771 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
3453 public: | 3772 public: |
3454 virtual Object* RetainAs(Object* object) { | 3773 virtual Object* RetainAs(Object* object) { |
3455 if (object->IsHeapObject()) { | 3774 if (object->IsHeapObject()) { |
3456 HeapObject* heap_object = HeapObject::cast(object); | 3775 HeapObject* heap_object = HeapObject::cast(object); |
3457 MapWord map_word = heap_object->map_word(); | 3776 MapWord map_word = heap_object->map_word(); |
3458 if (map_word.IsForwardingAddress()) { | 3777 if (map_word.IsForwardingAddress()) { |
3459 return map_word.ToForwardingAddress(); | 3778 return map_word.ToForwardingAddress(); |
(...skipping 214 matching lines...) Loading... | |
3674 } | 3993 } |
3675 state.SetLiveBytes(new_live_size); | 3994 state.SetLiveBytes(new_live_size); |
3676 } | 3995 } |
3677 | 3996 |
3678 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, | 3997 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, |
3679 Page* page) { | 3998 Page* page) { |
3680 base::LockGuard<base::Mutex> guard(&mutex_); | 3999 base::LockGuard<base::Mutex> guard(&mutex_); |
3681 swept_list_[space->identity()].Add(page); | 4000 swept_list_[space->identity()].Add(page); |
3682 } | 4001 } |
3683 | 4002 |
3684 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 4003 void MarkCompactCollector::Evacuate() { |
3685 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 4004 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
3686 Heap::RelocationLock relocation_lock(heap()); | 4005 Heap::RelocationLock relocation_lock(heap()); |
3687 | 4006 |
3688 { | 4007 { |
3689 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); | 4008 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); |
3690 EvacuatePrologue(); | 4009 EvacuatePrologue(); |
3691 } | 4010 } |
3692 | 4011 |
3693 { | 4012 { |
3694 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | 4013 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
(...skipping 69 matching lines...) Loading... | |
3764 UpdateUntypedPointers(heap, chunk); | 4083 UpdateUntypedPointers(heap, chunk); |
3765 UpdateTypedPointers(heap, chunk); | 4084 UpdateTypedPointers(heap, chunk); |
3766 return true; | 4085 return true; |
3767 } | 4086 } |
3768 static const bool NeedSequentialFinalization = false; | 4087 static const bool NeedSequentialFinalization = false; |
3769 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4088 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3770 } | 4089 } |
3771 | 4090 |
3772 private: | 4091 private: |
3773 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | 4092 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
4093 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); | |
3774 if (type == OLD_TO_NEW) { | 4094 if (type == OLD_TO_NEW) { |
3775 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { | 4095 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
3776 return CheckAndUpdateOldToNewSlot(heap, slot); | 4096 return CheckAndUpdateOldToNewSlot(heap, slot); |
3777 }); | 4097 }); |
3778 } else { | 4098 } else { |
3779 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { | 4099 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { |
3780 return UpdateSlot(reinterpret_cast<Object**>(slot)); | 4100 return UpdateSlot(reinterpret_cast<Object**>(slot)); |
3781 }); | 4101 }); |
3782 } | 4102 } |
3783 } | 4103 } |
(...skipping 86 matching lines...) Loading... | |
3870 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4190 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3871 RememberedSet<type>::IterateMemoryChunks( | 4191 RememberedSet<type>::IterateMemoryChunks( |
3872 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 4192 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
3873 int num_pages = job.NumberOfPages(); | 4193 int num_pages = job.NumberOfPages(); |
3874 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 4194 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
3875 job.Run(num_tasks, [](int i) { return 0; }); | 4195 job.Run(num_tasks, [](int i) { return 0; }); |
3876 } | 4196 } |
3877 | 4197 |
3878 class ToSpacePointerUpdateJobTraits { | 4198 class ToSpacePointerUpdateJobTraits { |
3879 public: | 4199 public: |
3880 typedef std::pair<Address, Address> PerPageData; | 4200 struct PageData { |
4201 Address start; | |
4202 Address end; | |
4203 MarkingState marking_state; | |
4204 }; | |
4205 | |
4206 typedef PageData PerPageData; | |
3881 typedef PointersUpdatingVisitor* PerTaskData; | 4207 typedef PointersUpdatingVisitor* PerTaskData; |
3882 | 4208 |
3883 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 4209 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
3884 MemoryChunk* chunk, PerPageData limits) { | 4210 MemoryChunk* chunk, PerPageData page_data) { |
3885 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | 4211 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
3886 // New->new promoted pages contain garbage so they require iteration | 4212 // New->new promoted pages contain garbage so they require iteration |
3887 // using markbits. | 4213 // using markbits. |
3888 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); | 4214 ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data); |
3889 } else { | 4215 } else { |
3890 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); | 4216 ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); |
3891 } | 4217 } |
3892 return true; | 4218 return true; |
3893 } | 4219 } |
3894 | 4220 |
3895 static const bool NeedSequentialFinalization = false; | 4221 static const bool NeedSequentialFinalization = false; |
3896 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4222 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3897 } | 4223 } |
3898 | 4224 |
3899 private: | 4225 private: |
3900 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, | 4226 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
3901 MemoryChunk* chunk, | 4227 MemoryChunk* chunk, |
3902 PerPageData limits) { | 4228 PerPageData page_data) { |
3903 for (Address cur = limits.first; cur < limits.second;) { | 4229 for (Address cur = page_data.start; cur < page_data.end;) { |
3904 HeapObject* object = HeapObject::FromAddress(cur); | 4230 HeapObject* object = HeapObject::FromAddress(cur); |
3905 Map* map = object->map(); | 4231 Map* map = object->map(); |
3906 int size = object->SizeFromMap(map); | 4232 int size = object->SizeFromMap(map); |
3907 object->IterateBody(map->instance_type(), size, visitor); | 4233 object->IterateBody(map->instance_type(), size, visitor); |
3908 cur += size; | 4234 cur += size; |
3909 } | 4235 } |
3910 } | 4236 } |
3911 | 4237 |
3912 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, | 4238 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
3913 MemoryChunk* chunk, | 4239 MemoryChunk* chunk, |
3914 PerPageData limits) { | 4240 PerPageData page_data) { |
3915 LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk)); | 4241 LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state); |
3916 HeapObject* object = NULL; | 4242 HeapObject* object = NULL; |
3917 while ((object = it.Next()) != NULL) { | 4243 while ((object = it.Next()) != NULL) { |
3918 Map* map = object->map(); | 4244 Map* map = object->map(); |
3919 int size = object->SizeFromMap(map); | 4245 int size = object->SizeFromMap(map); |
3920 object->IterateBody(map->instance_type(), size, visitor); | 4246 object->IterateBody(map->instance_type(), size, visitor); |
3921 } | 4247 } |
3922 } | 4248 } |
3923 }; | 4249 }; |
3924 | 4250 |
3925 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 4251 template <class MarkingStateProvider> |
4252 void UpdateToSpacePointersInParallel( | |
4253 Heap* heap, base::Semaphore* semaphore, | |
4254 const MarkingStateProvider& marking_state_provider) { | |
3926 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 4255 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
3927 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4256 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3928 Address space_start = heap->new_space()->bottom(); | 4257 Address space_start = heap->new_space()->bottom(); |
3929 Address space_end = heap->new_space()->top(); | 4258 Address space_end = heap->new_space()->top(); |
3930 for (Page* page : PageRange(space_start, space_end)) { | 4259 for (Page* page : PageRange(space_start, space_end)) { |
3931 Address start = | 4260 Address start = |
3932 page->Contains(space_start) ? space_start : page->area_start(); | 4261 page->Contains(space_start) ? space_start : page->area_start(); |
3933 Address end = page->Contains(space_end) ? space_end : page->area_end(); | 4262 Address end = page->Contains(space_end) ? space_end : page->area_end(); |
3934 job.AddPage(page, std::make_pair(start, end)); | 4263 job.AddPage(page, {start, end, marking_state_provider.marking_state(page)}); |
3935 } | 4264 } |
3936 PointersUpdatingVisitor visitor; | 4265 PointersUpdatingVisitor visitor; |
3937 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; | 4266 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
3938 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 4267 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
3939 } | 4268 } |
3940 | 4269 |
3941 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 4270 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
3942 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 4271 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
3943 | 4272 |
3944 | 4273 |
3945 { | 4274 { |
3946 TRACE_GC(heap()->tracer(), | 4275 TRACE_GC(heap()->tracer(), |
3947 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 4276 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
3948 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); | 4277 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
4278 *this); | |
3949 // Update roots. | 4279 // Update roots. |
3950 PointersUpdatingVisitor updating_visitor; | 4280 PointersUpdatingVisitor updating_visitor; |
3951 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 4281 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
3952 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | 4282 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
3953 } | 4283 } |
3954 | 4284 |
3955 { | 4285 { |
3956 Heap* heap = this->heap(); | 4286 Heap* heap = this->heap(); |
3957 TRACE_GC(heap->tracer(), | 4287 TRACE_GC(heap->tracer(), |
3958 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 4288 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
3959 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); | 4289 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); |
3960 } | 4290 } |
3961 | 4291 |
3962 { | 4292 { |
3963 TRACE_GC(heap()->tracer(), | 4293 TRACE_GC(heap()->tracer(), |
3964 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 4294 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
3965 // Update pointers from external string table. | 4295 // Update pointers from external string table. |
3966 heap_->UpdateReferencesInExternalStringTable( | 4296 heap_->UpdateReferencesInExternalStringTable( |
3967 &UpdateReferenceInExternalStringTableEntry); | 4297 &UpdateReferenceInExternalStringTableEntry); |
3968 | 4298 |
3969 EvacuationWeakObjectRetainer evacuation_object_retainer; | 4299 EvacuationWeakObjectRetainer evacuation_object_retainer; |
3970 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | 4300 heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
3971 } | 4301 } |
3972 } | 4302 } |
3973 | 4303 |
4304 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { | |
4305 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | |
4306 | |
4307 PointersUpdatingVisitor updating_visitor; | |
4308 | |
4309 { | |
4310 TRACE_GC(heap()->tracer(), | |
4311 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | |
4312 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, | |
4313 *this); | |
4314 // TODO(mlippautz): Iteration mode is not optimal as we process all | |
4315 // global handles. Find a way to only process the ones related to new | |
4316 // space. | |
4317 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | |
4318 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | |
4319 } | |
4320 | |
4321 { | |
4322 TRACE_GC(heap()->tracer(), | |
4323 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | |
4324 | |
4325 EvacuationWeakObjectRetainer evacuation_object_retainer; | |
4326 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | |
4327 | |
4328 // Update pointers from external string table. | |
4329 heap()->UpdateNewSpaceReferencesInExternalStringTable( | |
4330 &UpdateReferenceInExternalStringTableEntry); | |
4331 heap()->IterateEncounteredWeakCollections(&updating_visitor); | |
4332 } | |
4333 } | |
3974 | 4334 |
3975 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 4335 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
3976 for (Page* p : old_space_evacuation_pages_) { | 4336 for (Page* p : old_space_evacuation_pages_) { |
3977 if (!p->IsEvacuationCandidate()) continue; | 4337 if (!p->IsEvacuationCandidate()) continue; |
3978 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 4338 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3979 MarkingState::Internal(p).SetLiveBytes(0); | 4339 MarkingState::Internal(p).SetLiveBytes(0); |
3980 CHECK(p->SweepingDone()); | 4340 CHECK(p->SweepingDone()); |
3981 space->ReleasePage(p); | 4341 space->ReleasePage(p); |
3982 } | 4342 } |
3983 old_space_evacuation_pages_.Rewind(0); | 4343 old_space_evacuation_pages_.Rewind(0); |
(...skipping 95 matching lines...) Loading... | |
4079 | 4439 |
4080 int will_be_swept = 0; | 4440 int will_be_swept = 0; |
4081 bool unused_page_present = false; | 4441 bool unused_page_present = false; |
4082 | 4442 |
4083 // Loop needs to support deletion if live bytes == 0 for a page. | 4443 // Loop needs to support deletion if live bytes == 0 for a page. |
4084 for (auto it = space->begin(); it != space->end();) { | 4444 for (auto it = space->begin(); it != space->end();) { |
4085 Page* p = *(it++); | 4445 Page* p = *(it++); |
4086 DCHECK(p->SweepingDone()); | 4446 DCHECK(p->SweepingDone()); |
4087 | 4447 |
4088 if (p->IsEvacuationCandidate()) { | 4448 if (p->IsEvacuationCandidate()) { |
4089 // Will be processed in EvacuateNewSpaceAndCandidates. | 4449 // Will be processed in Evacuate. |
4090 DCHECK(evacuation_candidates_.length() > 0); | 4450 DCHECK(evacuation_candidates_.length() > 0); |
4091 continue; | 4451 continue; |
4092 } | 4452 } |
4093 | 4453 |
4094 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 4454 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
4095 // We need to sweep the page to get it into an iterable state again. Note | 4455 // We need to sweep the page to get it into an iterable state again. Note |
4096 // that this adds unusable memory into the free list that is later on | 4456 // that this adds unusable memory into the free list that is later on |
4097 // (in the free list) dropped again. Since we only use the flag for | 4457 // (in the free list) dropped again. Since we only use the flag for |
4098 // testing this is fine. | 4458 // testing this is fine. |
4099 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 4459 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
(...skipping 86 matching lines...) Loading... | |
4186 // The target is always in old space, we don't have to record the slot in | 4546 // The target is always in old space, we don't have to record the slot in |
4187 // the old-to-new remembered set. | 4547 // the old-to-new remembered set. |
4188 DCHECK(!heap()->InNewSpace(target)); | 4548 DCHECK(!heap()->InNewSpace(target)); |
4189 RecordRelocSlot(host, &rinfo, target); | 4549 RecordRelocSlot(host, &rinfo, target); |
4190 } | 4550 } |
4191 } | 4551 } |
4192 } | 4552 } |
4193 | 4553 |
4194 } // namespace internal | 4554 } // namespace internal |
4195 } // namespace v8 | 4555 } // namespace v8 |
OLD | NEW |