OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
297 HeapObject* object = HeapObject::cast(*current); | 297 HeapObject* object = HeapObject::cast(*current); |
298 if (heap()->InNewSpace(object)) { | 298 if (heap()->InNewSpace(object)) { |
299 CHECK(heap()->InToSpace(object)); | 299 CHECK(heap()->InToSpace(object)); |
300 } | 300 } |
301 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | 301 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
302 } | 302 } |
303 } | 303 } |
304 } | 304 } |
305 }; | 305 }; |
306 | 306 |
| 307 class YoungGenerationEvacuationVerifier : public EvacuationVerifier { |
| 308 public: |
| 309 explicit YoungGenerationEvacuationVerifier(Heap* heap) |
| 310 : EvacuationVerifier(heap) {} |
| 311 |
| 312 void Run() override { |
| 313 VerifyRoots(VISIT_ALL_IN_SCAVENGE); |
| 314 VerifyEvacuation(heap_->new_space()); |
| 315 VerifyEvacuation(heap_->old_space()); |
| 316 VerifyEvacuation(heap_->code_space()); |
| 317 VerifyEvacuation(heap_->map_space()); |
| 318 } |
| 319 |
| 320 protected: |
| 321 void VerifyPointers(Object** start, Object** end) override { |
| 322 for (Object** current = start; current < end; current++) { |
| 323 if ((*current)->IsHeapObject()) { |
| 324 HeapObject* object = HeapObject::cast(*current); |
| 325 CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object)); |
| 326 } |
| 327 } |
| 328 } |
| 329 }; |
| 330 |
307 } // namespace | 331 } // namespace |
308 #endif // VERIFY_HEAP | 332 #endif // VERIFY_HEAP |
309 | 333 |
310 // ============================================================================= | 334 // ============================================================================= |
311 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector | 335 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector |
312 // ============================================================================= | 336 // ============================================================================= |
313 | 337 |
314 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( | 338 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks( |
315 int pages, intptr_t live_bytes) { | 339 int pages, intptr_t live_bytes) { |
316 if (!FLAG_parallel_compaction) return 1; | 340 if (!FLAG_parallel_compaction) return 1; |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
433 | 457 |
434 #ifdef VERIFY_HEAP | 458 #ifdef VERIFY_HEAP |
435 if (FLAG_verify_heap) { | 459 if (FLAG_verify_heap) { |
436 FullMarkingVerifier verifier(heap()); | 460 FullMarkingVerifier verifier(heap()); |
437 verifier.Run(); | 461 verifier.Run(); |
438 } | 462 } |
439 #endif | 463 #endif |
440 | 464 |
441 StartSweepSpaces(); | 465 StartSweepSpaces(); |
442 | 466 |
443 EvacuateNewSpaceAndCandidates(); | 467 Evacuate(); |
444 | 468 |
445 Finish(); | 469 Finish(); |
446 } | 470 } |
447 | 471 |
448 #ifdef VERIFY_HEAP | 472 #ifdef VERIFY_HEAP |
449 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { | 473 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
450 for (Page* p : *space) { | 474 for (Page* p : *space) { |
451 const MarkingState state = MarkingState::Internal(p); | 475 const MarkingState state = MarkingState::Internal(p); |
452 CHECK(state.bitmap()->IsClean()); | 476 CHECK(state.bitmap()->IsClean()); |
453 CHECK_EQ(0, state.live_bytes()); | 477 CHECK_EQ(0, state.live_bytes()); |
(...skipping 1139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1593 *p = the_hole; | 1617 *p = the_hole; |
1594 } | 1618 } |
1595 } | 1619 } |
1596 } | 1620 } |
1597 } | 1621 } |
1598 | 1622 |
1599 private: | 1623 private: |
1600 Heap* heap_; | 1624 Heap* heap_; |
1601 }; | 1625 }; |
1602 | 1626 |
| 1627 // Helper class for pruning the string table. |
| 1628 class YoungGenerationExternalStringTableCleaner : public RootVisitor { |
| 1629 public: |
| 1630 YoungGenerationExternalStringTableCleaner( |
| 1631 const MinorMarkCompactCollector& collector) |
| 1632 : heap_(collector.heap()), collector_(collector) {} |
| 1633 |
| 1634 void VisitRootPointers(Root root, Object** start, Object** end) override { |
| 1635 DCHECK_EQ(static_cast<int>(root), |
| 1636 static_cast<int>(Root::kExternalStringsTable)); |
| 1637 // Visit all HeapObject pointers in [start, end). |
| 1638 for (Object** p = start; p < end; p++) { |
| 1639 Object* o = *p; |
| 1640 if (o->IsHeapObject()) { |
| 1641 HeapObject* heap_object = HeapObject::cast(o); |
| 1642 if (ObjectMarking::IsWhite(heap_object, |
| 1643 collector_.marking_state(heap_object))) { |
| 1644 if (o->IsExternalString()) { |
| 1645 heap_->FinalizeExternalString(String::cast(*p)); |
| 1646 } else { |
| 1647 // The original external string may have been internalized. |
| 1648 DCHECK(o->IsThinString()); |
| 1649 } |
| 1650 // Set the entry to the_hole_value (as deleted). |
| 1651 *p = heap_->the_hole_value(); |
| 1652 } |
| 1653 } |
| 1654 } |
| 1655 } |
| 1656 |
| 1657 private: |
| 1658 Heap* heap_; |
| 1659 const MinorMarkCompactCollector& collector_; |
| 1660 }; |
| 1661 |
| 1662 // Marked young generation objects and all old generation objects will be |
| 1663 // retained. |
| 1664 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| 1665 public: |
| 1666 explicit MinorMarkCompactWeakObjectRetainer( |
| 1667 const MinorMarkCompactCollector& collector) |
| 1668 : collector_(collector) {} |
| 1669 |
| 1670 virtual Object* RetainAs(Object* object) { |
| 1671 HeapObject* heap_object = HeapObject::cast(object); |
| 1672 if (!collector_.heap()->InNewSpace(heap_object)) return object; |
| 1673 |
| 1674 DCHECK(!ObjectMarking::IsGrey(heap_object, |
| 1675 collector_.marking_state(heap_object))); |
| 1676 if (ObjectMarking::IsBlack(heap_object, |
| 1677 collector_.marking_state(heap_object))) { |
| 1678 return object; |
| 1679 } |
| 1680 return nullptr; |
| 1681 } |
| 1682 |
| 1683 private: |
| 1684 const MinorMarkCompactCollector& collector_; |
| 1685 }; |
| 1686 |
1603 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects | 1687 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
1604 // are retained. | 1688 // are retained. |
1605 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { | 1689 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
1606 public: | 1690 public: |
1607 virtual Object* RetainAs(Object* object) { | 1691 virtual Object* RetainAs(Object* object) { |
1608 HeapObject* heap_object = HeapObject::cast(object); | 1692 HeapObject* heap_object = HeapObject::cast(object); |
1609 DCHECK(!ObjectMarking::IsGrey(heap_object, | 1693 DCHECK(!ObjectMarking::IsGrey(heap_object, |
1610 MarkingState::Internal(heap_object))); | 1694 MarkingState::Internal(heap_object))); |
1611 if (ObjectMarking::IsBlack(heap_object, | 1695 if (ObjectMarking::IsBlack(heap_object, |
1612 MarkingState::Internal(heap_object))) { | 1696 MarkingState::Internal(heap_object))) { |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1717 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { | 1801 inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override { |
1718 DCHECK_EQ(host, rinfo->host()); | 1802 DCHECK_EQ(host, rinfo->host()); |
1719 DCHECK(rinfo->rmode() == RelocInfo::CELL); | 1803 DCHECK(rinfo->rmode() == RelocInfo::CELL); |
1720 Cell* cell = rinfo->target_cell(); | 1804 Cell* cell = rinfo->target_cell(); |
1721 // The cell is always in old space, we don't have to record the slot in | 1805 // The cell is always in old space, we don't have to record the slot in |
1722 // the old-to-new remembered set. | 1806 // the old-to-new remembered set. |
1723 DCHECK(!collector_->heap()->InNewSpace(cell)); | 1807 DCHECK(!collector_->heap()->InNewSpace(cell)); |
1724 collector_->RecordRelocSlot(host, rinfo, cell); | 1808 collector_->RecordRelocSlot(host, rinfo, cell); |
1725 } | 1809 } |
1726 | 1810 |
1727 // Entries that will never move. | |
1728 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { | 1811 inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { |
1729 DCHECK_EQ(host, rinfo->host()); | 1812 DCHECK_EQ(host, rinfo->host()); |
1730 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); | 1813 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); |
1731 Code* stub = rinfo->code_age_stub(); | 1814 Code* stub = rinfo->code_age_stub(); |
1732 USE(stub); | 1815 USE(stub); |
1733 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); | 1816 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate()); |
1734 } | 1817 } |
1735 | 1818 |
1736 // Entries that are skipped for recording. | 1819 // Entries that are skipped for recording. |
1737 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} | 1820 inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {} |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1774 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, | 1857 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, |
1775 int size) final { | 1858 int size) final { |
1776 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) { | 1859 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) { |
1777 PROFILE(heap_->isolate(), | 1860 PROFILE(heap_->isolate(), |
1778 CodeMoveEvent(AbstractCode::cast(src), dst->address())); | 1861 CodeMoveEvent(AbstractCode::cast(src), dst->address())); |
1779 } | 1862 } |
1780 heap_->OnMoveEvent(dst, src, size); | 1863 heap_->OnMoveEvent(dst, src, size); |
1781 } | 1864 } |
1782 }; | 1865 }; |
1783 | 1866 |
| 1867 class YoungGenerationMigrationObserver final : public MigrationObserver { |
| 1868 public: |
| 1869 YoungGenerationMigrationObserver(Heap* heap, |
| 1870 MarkCompactCollector* mark_compact_collector) |
| 1871 : MigrationObserver(heap), |
| 1872 mark_compact_collector_(mark_compact_collector) {} |
| 1873 |
| 1874 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst, |
| 1875 int size) final { |
| 1876 // Migrate color to old generation marking in case the object survived young |
| 1877 // generation garbage collection. |
| 1878 if (heap_->incremental_marking()->IsMarking()) { |
| 1879 DCHECK(ObjectMarking::IsWhite( |
| 1880 dst, mark_compact_collector_->marking_state(dst))); |
| 1881 heap_->incremental_marking()->TransferColor<MarkBit::ATOMIC>(src, dst); |
| 1882 } |
| 1883 } |
| 1884 |
| 1885 protected: |
| 1886 base::Mutex mutex_; |
| 1887 MarkCompactCollector* mark_compact_collector_; |
| 1888 }; |
| 1889 |
| 1890 class YoungGenerationRecordMigratedSlotVisitor final |
| 1891 : public RecordMigratedSlotVisitor { |
| 1892 public: |
| 1893 explicit YoungGenerationRecordMigratedSlotVisitor( |
| 1894 MarkCompactCollector* collector) |
| 1895 : RecordMigratedSlotVisitor(collector) {} |
| 1896 |
| 1897 inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final { |
| 1898 Address code_entry = Memory::Address_at(code_entry_slot); |
| 1899 if (Page::FromAddress(code_entry)->IsEvacuationCandidate() && |
| 1900 IsLive(host)) { |
| 1901 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), |
| 1902 nullptr, CODE_ENTRY_SLOT, |
| 1903 code_entry_slot); |
| 1904 } |
| 1905 } |
| 1906 |
| 1907 void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } |
| 1908 void VisitDebugTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } |
| 1909 void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final { |
| 1910 UNREACHABLE(); |
| 1911 } |
| 1912 void VisitCellPointer(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } |
| 1913 void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final { |
| 1914 UNREACHABLE(); |
| 1915 } |
| 1916 |
| 1917 private: |
| 1918 // Only record slots for host objects that are considered as live by the full |
| 1919 // collector. |
| 1920 inline bool IsLive(HeapObject* object) { |
| 1921 return ObjectMarking::IsBlack(object, collector_->marking_state(object)); |
| 1922 } |
| 1923 |
| 1924 inline void RecordMigratedSlot(HeapObject* host, Object* value, |
| 1925 Address slot) final { |
| 1926 if (value->IsHeapObject()) { |
| 1927 Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); |
| 1928 if (p->InNewSpace()) { |
| 1929 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); |
| 1930 } else if (p->IsEvacuationCandidate() && IsLive(host)) { |
| 1931 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); |
| 1932 } |
| 1933 } |
| 1934 } |
| 1935 }; |
| 1936 |
1784 class HeapObjectVisitor { | 1937 class HeapObjectVisitor { |
1785 public: | 1938 public: |
1786 virtual ~HeapObjectVisitor() {} | 1939 virtual ~HeapObjectVisitor() {} |
1787 virtual bool Visit(HeapObject* object) = 0; | 1940 virtual bool Visit(HeapObject* object) = 0; |
1788 }; | 1941 }; |
1789 | 1942 |
1790 class EvacuateVisitorBase : public HeapObjectVisitor { | 1943 class EvacuateVisitorBase : public HeapObjectVisitor { |
1791 public: | 1944 public: |
1792 void AddObserver(MigrationObserver* observer) { | 1945 void AddObserver(MigrationObserver* observer) { |
1793 migration_function_ = RawMigrateObject<MigrationMode::kObserved>; | 1946 migration_function_ = RawMigrateObject<MigrationMode::kObserved>; |
(...skipping 562 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2356 return KEEP_SLOT; | 2509 return KEEP_SLOT; |
2357 } | 2510 } |
2358 ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state); | 2511 ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state); |
2359 StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(), | 2512 StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(), |
2360 heap_object); | 2513 heap_object); |
2361 return KEEP_SLOT; | 2514 return KEEP_SLOT; |
2362 } | 2515 } |
2363 return REMOVE_SLOT; | 2516 return REMOVE_SLOT; |
2364 } | 2517 } |
2365 | 2518 |
2366 static bool IsUnmarkedObject(Heap* heap, Object** p) { | 2519 static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) { |
2367 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); | 2520 DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); |
2368 return heap->InNewSpace(*p) && | 2521 return heap->InNewSpace(*p) && |
2369 !ObjectMarking::IsBlack(HeapObject::cast(*p), | 2522 !ObjectMarking::IsBlack(HeapObject::cast(*p), |
2370 MarkingState::Internal(HeapObject::cast(*p))); | 2523 MarkingState::External(HeapObject::cast(*p))); |
2371 } | 2524 } |
2372 | 2525 |
2373 void MinorMarkCompactCollector::MarkLiveObjects() { | 2526 void MinorMarkCompactCollector::MarkLiveObjects() { |
2374 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); | 2527 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK); |
2375 | 2528 |
2376 PostponeInterruptsScope postpone(isolate()); | 2529 PostponeInterruptsScope postpone(isolate()); |
2377 | 2530 |
2378 StaticYoungGenerationMarkingVisitor::Initialize(heap()); | 2531 StaticYoungGenerationMarkingVisitor::Initialize(heap()); |
2379 RootMarkingVisitor root_visitor(this); | 2532 RootMarkingVisitor root_visitor(this); |
2380 | 2533 |
(...skipping 28 matching lines...) Expand all Loading... |
2409 | 2562 |
2410 { | 2563 { |
2411 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK); | 2564 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK); |
2412 heap()->IterateEncounteredWeakCollections(&root_visitor); | 2565 heap()->IterateEncounteredWeakCollections(&root_visitor); |
2413 ProcessMarkingDeque(); | 2566 ProcessMarkingDeque(); |
2414 } | 2567 } |
2415 | 2568 |
2416 { | 2569 { |
2417 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); | 2570 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); |
2418 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( | 2571 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
2419 &IsUnmarkedObject); | 2572 &IsUnmarkedObjectForYoungGeneration); |
2420 isolate() | 2573 isolate() |
2421 ->global_handles() | 2574 ->global_handles() |
2422 ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>( | 2575 ->IterateNewSpaceWeakUnmodifiedRoots< |
2423 &root_visitor); | 2576 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor); |
2424 ProcessMarkingDeque(); | 2577 ProcessMarkingDeque(); |
2425 } | 2578 } |
2426 | 2579 |
2427 marking_deque()->StopUsing(); | 2580 marking_deque()->StopUsing(); |
2428 } | 2581 } |
2429 | 2582 |
2430 void MinorMarkCompactCollector::ProcessMarkingDeque() { | 2583 void MinorMarkCompactCollector::ProcessMarkingDeque() { |
2431 EmptyMarkingDeque(); | 2584 EmptyMarkingDeque(); |
2432 DCHECK(!marking_deque()->overflowed()); | 2585 DCHECK(!marking_deque()->overflowed()); |
2433 DCHECK(marking_deque()->IsEmpty()); | 2586 DCHECK(marking_deque()->IsEmpty()); |
(...skipping 11 matching lines...) Expand all Loading... |
2445 object, MarkingState::External(object)))); | 2598 object, MarkingState::External(object)))); |
2446 | 2599 |
2447 Map* map = object->map(); | 2600 Map* map = object->map(); |
2448 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( | 2601 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( |
2449 object, MarkingState::External(object)))); | 2602 object, MarkingState::External(object)))); |
2450 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); | 2603 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); |
2451 } | 2604 } |
2452 } | 2605 } |
2453 | 2606 |
2454 void MinorMarkCompactCollector::CollectGarbage() { | 2607 void MinorMarkCompactCollector::CollectGarbage() { |
| 2608 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); |
| 2609 |
2455 MarkLiveObjects(); | 2610 MarkLiveObjects(); |
2456 | 2611 ClearNonLiveReferences(); |
2457 #ifdef VERIFY_HEAP | 2612 #ifdef VERIFY_HEAP |
2458 if (FLAG_verify_heap) { | 2613 if (FLAG_verify_heap) { |
2459 YoungGenerationMarkingVerifier verifier(heap()); | 2614 YoungGenerationMarkingVerifier verifier(heap()); |
2460 verifier.Run(); | 2615 verifier.Run(); |
2461 } | 2616 } |
2462 #endif // VERIFY_HEAP | 2617 #endif // VERIFY_HEAP |
| 2618 |
| 2619 Evacuate(); |
| 2620 #ifdef VERIFY_HEAP |
| 2621 if (FLAG_verify_heap) { |
| 2622 YoungGenerationEvacuationVerifier verifier(heap()); |
| 2623 verifier.Run(); |
| 2624 } |
| 2625 #endif // VERIFY_HEAP |
| 2626 |
| 2627 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); |
| 2628 |
| 2629 { |
| 2630 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); |
| 2631 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), |
| 2632 heap()->new_space()->FromSpaceEnd())) { |
| 2633 marking_state(p).ClearLiveness(); |
| 2634 } |
| 2635 } |
| 2636 } |
| 2637 |
| 2638 void MinorMarkCompactCollector::ClearNonLiveReferences() { |
| 2639 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); |
| 2640 |
| 2641 { |
| 2642 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); |
| 2643 // Internalized strings are always stored in old space, so there is no need |
| 2644 // to clean them here. |
| 2645 YoungGenerationExternalStringTableCleaner external_visitor(*this); |
| 2646 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); |
| 2647 heap()->external_string_table_.CleanUpNewSpaceStrings(); |
| 2648 } |
| 2649 |
| 2650 { |
| 2651 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); |
| 2652 // Process the weak references. |
| 2653 MinorMarkCompactWeakObjectRetainer retainer(*this); |
| 2654 heap()->ProcessYoungWeakReferences(&retainer); |
| 2655 } |
| 2656 } |
| 2657 |
| 2658 void MinorMarkCompactCollector::EvacuatePrologue() { |
| 2659 NewSpace* new_space = heap()->new_space(); |
| 2660 // Append the list of new space pages to be processed. |
| 2661 for (Page* p : PageRange(new_space->bottom(), new_space->top())) { |
| 2662 new_space_evacuation_pages_.Add(p); |
| 2663 } |
| 2664 new_space->Flip(); |
| 2665 new_space->ResetAllocationInfo(); |
| 2666 } |
| 2667 |
| 2668 void MinorMarkCompactCollector::EvacuateEpilogue() { |
| 2669 heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
| 2670 } |
| 2671 |
| 2672 void MinorMarkCompactCollector::Evacuate() { |
| 2673 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| 2674 Heap::RelocationLock relocation_lock(heap()); |
| 2675 |
| 2676 { |
| 2677 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); |
| 2678 EvacuatePrologue(); |
| 2679 } |
| 2680 |
| 2681 { |
| 2682 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
| 2683 EvacuatePagesInParallel(); |
| 2684 } |
| 2685 |
| 2686 UpdatePointersAfterEvacuation(); |
| 2687 |
| 2688 { |
| 2689 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); |
| 2690 if (!heap()->new_space()->Rebalance()) { |
| 2691 FatalProcessOutOfMemory("NewSpace::Rebalance"); |
| 2692 } |
| 2693 } |
| 2694 |
| 2695 // Give pages that are queued to be freed back to the OS. |
| 2696 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| 2697 |
| 2698 { |
| 2699 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| 2700 // TODO(mlippautz): Implement page promotion. |
| 2701 new_space_evacuation_pages_.Rewind(0); |
| 2702 } |
| 2703 |
| 2704 { |
| 2705 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); |
| 2706 EvacuateEpilogue(); |
| 2707 } |
2463 } | 2708 } |
2464 | 2709 |
2465 void MarkCompactCollector::MarkLiveObjects() { | 2710 void MarkCompactCollector::MarkLiveObjects() { |
2466 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); | 2711 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK); |
2467 // The recursive GC marker detects when it is nearing stack overflow, | 2712 // The recursive GC marker detects when it is nearing stack overflow, |
2468 // and switches to a different marking system. JS interrupts interfere | 2713 // and switches to a different marking system. JS interrupts interfere |
2469 // with the C stack limit check. | 2714 // with the C stack limit check. |
2470 PostponeInterruptsScope postpone(isolate()); | 2715 PostponeInterruptsScope postpone(isolate()); |
2471 | 2716 |
2472 { | 2717 { |
(...skipping 836 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3309 success = false; | 3554 success = false; |
3310 } else { | 3555 } else { |
3311 ArrayBufferTracker::ProcessBuffers( | 3556 ArrayBufferTracker::ProcessBuffers( |
3312 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | 3557 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
3313 } | 3558 } |
3314 break; | 3559 break; |
3315 } | 3560 } |
3316 return success; | 3561 return success; |
3317 } | 3562 } |
3318 | 3563 |
| 3564 class YoungGenerationEvacuator : public Evacuator { |
| 3565 public: |
| 3566 YoungGenerationEvacuator(MinorMarkCompactCollector* collector, |
| 3567 RecordMigratedSlotVisitor* record_visitor) |
| 3568 : Evacuator(collector->heap(), record_visitor), collector_(collector) {} |
| 3569 |
| 3570 protected: |
| 3571 bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; |
| 3572 |
| 3573 MinorMarkCompactCollector* collector_; |
| 3574 }; |
| 3575 |
| 3576 bool YoungGenerationEvacuator::RawEvacuatePage(Page* page, |
| 3577 intptr_t* live_bytes) { |
| 3578 bool success = false; |
| 3579 LiveObjectVisitor object_visitor; |
| 3580 const MarkingState state = collector_->marking_state(page); |
| 3581 *live_bytes = state.live_bytes(); |
| 3582 switch (ComputeEvacuationMode(page)) { |
| 3583 case kObjectsNewToOld: |
| 3584 success = object_visitor.VisitBlackObjects( |
| 3585 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); |
| 3586 DCHECK(success); |
| 3587 ArrayBufferTracker::ProcessBuffers( |
| 3588 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3589 break; |
| 3590 case kPageNewToOld: |
| 3591 // TODO(mlippautz): Implement page promotion. |
| 3592 UNREACHABLE(); |
| 3593 break; |
| 3594 case kPageNewToNew: |
| 3595 // TODO(mlippautz): Implement page promotion. |
| 3596 UNREACHABLE(); |
| 3597 break; |
| 3598 case kObjectsOldToOld: |
| 3599 UNREACHABLE(); |
| 3600 break; |
| 3601 } |
| 3602 return success; |
| 3603 } |
| 3604 |
3319 class EvacuationJobTraits { | 3605 class EvacuationJobTraits { |
3320 public: | 3606 public: |
3321 typedef int* PerPageData; // Pointer to number of aborted pages. | 3607 struct PageData { |
| 3608 int* abandoned_pages; // Pointer to number of aborted pages. |
| 3609 MarkingState marking_state; |
| 3610 }; |
| 3611 |
| 3612 typedef PageData PerPageData; |
3322 typedef Evacuator* PerTaskData; | 3613 typedef Evacuator* PerTaskData; |
3323 | 3614 |
3324 static const bool NeedSequentialFinalization = true; | 3615 static const bool NeedSequentialFinalization = true; |
3325 | 3616 |
3326 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3617 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
3327 MemoryChunk* chunk, PerPageData) { | 3618 MemoryChunk* chunk, PerPageData) { |
3328 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3619 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
3329 } | 3620 } |
3330 | 3621 |
3331 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3622 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
(...skipping 12 matching lines...) Expand all Loading... |
3344 if (success) { | 3635 if (success) { |
3345 DCHECK(p->IsEvacuationCandidate()); | 3636 DCHECK(p->IsEvacuationCandidate()); |
3346 DCHECK(p->SweepingDone()); | 3637 DCHECK(p->SweepingDone()); |
3347 p->Unlink(); | 3638 p->Unlink(); |
3348 } else { | 3639 } else { |
3349 // We have partially compacted the page, i.e., some objects may have | 3640 // We have partially compacted the page, i.e., some objects may have |
3350 // moved, others are still in place. | 3641 // moved, others are still in place. |
3351 p->ClearEvacuationCandidate(); | 3642 p->ClearEvacuationCandidate(); |
3352 // Slots have already been recorded so we just need to add it to the | 3643 // Slots have already been recorded so we just need to add it to the |
3353 // sweeper, which will happen after updating pointers. | 3644 // sweeper, which will happen after updating pointers. |
3354 *data += 1; | 3645 *data.abandoned_pages += 1; |
3355 } | 3646 } |
3356 break; | 3647 break; |
3357 default: | 3648 default: |
3358 UNREACHABLE(); | 3649 UNREACHABLE(); |
3359 } | 3650 } |
3360 } | 3651 } |
3361 }; | 3652 }; |
3362 | 3653 |
3363 template <class Evacuator, class Collector> | 3654 template <class Evacuator, class Collector> |
3364 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( | 3655 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( |
3365 Collector* collector, PageParallelJob<EvacuationJobTraits>* job, | 3656 Collector* collector, PageParallelJob<EvacuationJobTraits>* job, |
3366 RecordMigratedSlotVisitor* record_visitor, const intptr_t live_bytes, | 3657 RecordMigratedSlotVisitor* record_visitor, |
| 3658 MigrationObserver* migration_observer, const intptr_t live_bytes, |
3367 const int& abandoned_pages) { | 3659 const int& abandoned_pages) { |
3368 // Used for trace summary. | 3660 // Used for trace summary. |
3369 double compaction_speed = 0; | 3661 double compaction_speed = 0; |
3370 if (FLAG_trace_evacuation) { | 3662 if (FLAG_trace_evacuation) { |
3371 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3663 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3372 } | 3664 } |
3373 | 3665 |
3374 const bool profiling = | 3666 const bool profiling = |
3375 heap()->isolate()->is_profiling() || | 3667 heap()->isolate()->is_profiling() || |
3376 heap()->isolate()->logger()->is_logging_code_events() || | 3668 heap()->isolate()->logger()->is_logging_code_events() || |
3377 heap()->isolate()->heap_profiler()->is_tracking_object_moves(); | 3669 heap()->isolate()->heap_profiler()->is_tracking_object_moves(); |
3378 ProfilingMigrationObserver profiling_observer(heap()); | 3670 ProfilingMigrationObserver profiling_observer(heap()); |
3379 | 3671 |
3380 const int wanted_num_tasks = | 3672 const int wanted_num_tasks = |
3381 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); | 3673 NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); |
3382 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; | 3674 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; |
3383 for (int i = 0; i < wanted_num_tasks; i++) { | 3675 for (int i = 0; i < wanted_num_tasks; i++) { |
3384 evacuators[i] = new Evacuator(collector, record_visitor); | 3676 evacuators[i] = new Evacuator(collector, record_visitor); |
3385 if (profiling) evacuators[i]->AddObserver(&profiling_observer); | 3677 if (profiling) evacuators[i]->AddObserver(&profiling_observer); |
| 3678 if (migration_observer != nullptr) |
| 3679 evacuators[i]->AddObserver(migration_observer); |
3386 } | 3680 } |
3387 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | 3681 job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
3388 const Address top = heap()->new_space()->top(); | 3682 const Address top = heap()->new_space()->top(); |
3389 for (int i = 0; i < wanted_num_tasks; i++) { | 3683 for (int i = 0; i < wanted_num_tasks; i++) { |
3390 evacuators[i]->Finalize(); | 3684 evacuators[i]->Finalize(); |
3391 // Try to find the last LAB that was used for new space allocation in | 3685 // Try to find the last LAB that was used for new space allocation in |
3392 // evacuation tasks. If it was adjacent to the current top, move top back. | 3686 // evacuation tasks. If it was adjacent to the current top, move top back. |
3393 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); | 3687 const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); |
3394 if (info.limit() != nullptr && info.limit() == top) { | 3688 if (info.limit() != nullptr && info.limit() == top) { |
3395 DCHECK_NOT_NULL(info.top()); | 3689 DCHECK_NOT_NULL(info.top()); |
(...skipping 18 matching lines...) Expand all Loading... |
3414 | 3708 |
3415 void MarkCompactCollector::EvacuatePagesInParallel() { | 3709 void MarkCompactCollector::EvacuatePagesInParallel() { |
3416 PageParallelJob<EvacuationJobTraits> job( | 3710 PageParallelJob<EvacuationJobTraits> job( |
3417 heap_, heap_->isolate()->cancelable_task_manager(), | 3711 heap_, heap_->isolate()->cancelable_task_manager(), |
3418 &page_parallel_job_semaphore_); | 3712 &page_parallel_job_semaphore_); |
3419 | 3713 |
3420 int abandoned_pages = 0; | 3714 int abandoned_pages = 0; |
3421 intptr_t live_bytes = 0; | 3715 intptr_t live_bytes = 0; |
3422 for (Page* page : old_space_evacuation_pages_) { | 3716 for (Page* page : old_space_evacuation_pages_) { |
3423 live_bytes += MarkingState::Internal(page).live_bytes(); | 3717 live_bytes += MarkingState::Internal(page).live_bytes(); |
3424 job.AddPage(page, &abandoned_pages); | 3718 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
3425 } | 3719 } |
3426 | 3720 |
3427 const bool reduce_memory = heap()->ShouldReduceMemory(); | 3721 const bool reduce_memory = heap()->ShouldReduceMemory(); |
3428 const Address age_mark = heap()->new_space()->age_mark(); | 3722 const Address age_mark = heap()->new_space()->age_mark(); |
3429 for (Page* page : new_space_evacuation_pages_) { | 3723 for (Page* page : new_space_evacuation_pages_) { |
3430 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); | 3724 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); |
3431 live_bytes += live_bytes_on_page; | 3725 live_bytes += live_bytes_on_page; |
3432 if (!reduce_memory && !page->NeverEvacuate() && | 3726 if (!reduce_memory && !page->NeverEvacuate() && |
3433 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && | 3727 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && |
3434 !page->Contains(age_mark) && | 3728 !page->Contains(age_mark) && |
3435 heap()->CanExpandOldGeneration(live_bytes_on_page)) { | 3729 heap()->CanExpandOldGeneration(live_bytes_on_page)) { |
3436 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { | 3730 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
3437 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); | 3731 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); |
3438 } else { | 3732 } else { |
3439 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); | 3733 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); |
3440 } | 3734 } |
3441 } | 3735 } |
3442 | 3736 |
3443 job.AddPage(page, &abandoned_pages); | 3737 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
3444 } | 3738 } |
3445 DCHECK_GE(job.NumberOfPages(), 1); | 3739 DCHECK_GE(job.NumberOfPages(), 1); |
3446 | 3740 |
3447 RecordMigratedSlotVisitor record_visitor(this); | 3741 RecordMigratedSlotVisitor record_visitor(this); |
3448 CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor, | 3742 CreateAndExecuteEvacuationTasks<FullEvacuator>( |
3449 live_bytes, abandoned_pages); | 3743 this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages); |
| 3744 } |
| 3745 |
| 3746 void MinorMarkCompactCollector::EvacuatePagesInParallel() { |
| 3747 PageParallelJob<EvacuationJobTraits> job( |
| 3748 heap_, heap_->isolate()->cancelable_task_manager(), |
| 3749 &page_parallel_job_semaphore_); |
| 3750 int abandoned_pages = 0; |
| 3751 intptr_t live_bytes = 0; |
| 3752 |
| 3753 for (Page* page : new_space_evacuation_pages_) { |
| 3754 intptr_t live_bytes_on_page = marking_state(page).live_bytes(); |
| 3755 live_bytes += live_bytes_on_page; |
| 3756 // TODO(mlippautz): Implement page promotion. |
| 3757 job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| 3758 } |
| 3759 DCHECK_GE(job.NumberOfPages(), 1); |
| 3760 |
| 3761 YoungGenerationMigrationObserver observer(heap(), |
| 3762 heap()->mark_compact_collector()); |
| 3763 YoungGenerationRecordMigratedSlotVisitor record_visitor( |
| 3764 heap()->mark_compact_collector()); |
| 3765 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( |
| 3766 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); |
3450 } | 3767 } |
3451 | 3768 |
3452 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3769 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
3453 public: | 3770 public: |
3454 virtual Object* RetainAs(Object* object) { | 3771 virtual Object* RetainAs(Object* object) { |
3455 if (object->IsHeapObject()) { | 3772 if (object->IsHeapObject()) { |
3456 HeapObject* heap_object = HeapObject::cast(object); | 3773 HeapObject* heap_object = HeapObject::cast(object); |
3457 MapWord map_word = heap_object->map_word(); | 3774 MapWord map_word = heap_object->map_word(); |
3458 if (map_word.IsForwardingAddress()) { | 3775 if (map_word.IsForwardingAddress()) { |
3459 return map_word.ToForwardingAddress(); | 3776 return map_word.ToForwardingAddress(); |
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3674 } | 3991 } |
3675 state.SetLiveBytes(new_live_size); | 3992 state.SetLiveBytes(new_live_size); |
3676 } | 3993 } |
3677 | 3994 |
3678 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, | 3995 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, |
3679 Page* page) { | 3996 Page* page) { |
3680 base::LockGuard<base::Mutex> guard(&mutex_); | 3997 base::LockGuard<base::Mutex> guard(&mutex_); |
3681 swept_list_[space->identity()].Add(page); | 3998 swept_list_[space->identity()].Add(page); |
3682 } | 3999 } |
3683 | 4000 |
3684 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 4001 void MarkCompactCollector::Evacuate() { |
3685 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 4002 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
3686 Heap::RelocationLock relocation_lock(heap()); | 4003 Heap::RelocationLock relocation_lock(heap()); |
3687 | 4004 |
3688 { | 4005 { |
3689 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); | 4006 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); |
3690 EvacuatePrologue(); | 4007 EvacuatePrologue(); |
3691 } | 4008 } |
3692 | 4009 |
3693 { | 4010 { |
3694 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | 4011 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3764 UpdateUntypedPointers(heap, chunk); | 4081 UpdateUntypedPointers(heap, chunk); |
3765 UpdateTypedPointers(heap, chunk); | 4082 UpdateTypedPointers(heap, chunk); |
3766 return true; | 4083 return true; |
3767 } | 4084 } |
3768 static const bool NeedSequentialFinalization = false; | 4085 static const bool NeedSequentialFinalization = false; |
3769 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4086 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3770 } | 4087 } |
3771 | 4088 |
3772 private: | 4089 private: |
3773 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | 4090 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
| 4091 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); |
3774 if (type == OLD_TO_NEW) { | 4092 if (type == OLD_TO_NEW) { |
3775 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { | 4093 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
3776 return CheckAndUpdateOldToNewSlot(heap, slot); | 4094 return CheckAndUpdateOldToNewSlot(heap, slot); |
3777 }); | 4095 }); |
3778 } else { | 4096 } else { |
3779 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { | 4097 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { |
3780 return UpdateSlot(reinterpret_cast<Object**>(slot)); | 4098 return UpdateSlot(reinterpret_cast<Object**>(slot)); |
3781 }); | 4099 }); |
3782 } | 4100 } |
3783 } | 4101 } |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3870 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4188 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3871 RememberedSet<type>::IterateMemoryChunks( | 4189 RememberedSet<type>::IterateMemoryChunks( |
3872 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 4190 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
3873 int num_pages = job.NumberOfPages(); | 4191 int num_pages = job.NumberOfPages(); |
3874 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 4192 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
3875 job.Run(num_tasks, [](int i) { return 0; }); | 4193 job.Run(num_tasks, [](int i) { return 0; }); |
3876 } | 4194 } |
3877 | 4195 |
3878 class ToSpacePointerUpdateJobTraits { | 4196 class ToSpacePointerUpdateJobTraits { |
3879 public: | 4197 public: |
3880 typedef std::pair<Address, Address> PerPageData; | 4198 struct PageData { |
| 4199 Address start; |
| 4200 Address end; |
| 4201 MarkingState marking_state; |
| 4202 }; |
| 4203 |
| 4204 typedef PageData PerPageData; |
3881 typedef PointersUpdatingVisitor* PerTaskData; | 4205 typedef PointersUpdatingVisitor* PerTaskData; |
3882 | 4206 |
3883 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 4207 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
3884 MemoryChunk* chunk, PerPageData limits) { | 4208 MemoryChunk* chunk, PerPageData page_data) { |
3885 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | 4209 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
3886 // New->new promoted pages contain garbage so they require iteration | 4210 // New->new promoted pages contain garbage so they require iteration |
3887 // using markbits. | 4211 // using markbits. |
3888 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); | 4212 ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data); |
3889 } else { | 4213 } else { |
3890 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); | 4214 ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); |
3891 } | 4215 } |
3892 return true; | 4216 return true; |
3893 } | 4217 } |
3894 | 4218 |
3895 static const bool NeedSequentialFinalization = false; | 4219 static const bool NeedSequentialFinalization = false; |
3896 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 4220 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3897 } | 4221 } |
3898 | 4222 |
3899 private: | 4223 private: |
3900 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, | 4224 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
3901 MemoryChunk* chunk, | 4225 MemoryChunk* chunk, |
3902 PerPageData limits) { | 4226 PerPageData page_data) { |
3903 for (Address cur = limits.first; cur < limits.second;) { | 4227 for (Address cur = page_data.start; cur < page_data.end;) { |
3904 HeapObject* object = HeapObject::FromAddress(cur); | 4228 HeapObject* object = HeapObject::FromAddress(cur); |
3905 Map* map = object->map(); | 4229 Map* map = object->map(); |
3906 int size = object->SizeFromMap(map); | 4230 int size = object->SizeFromMap(map); |
3907 object->IterateBody(map->instance_type(), size, visitor); | 4231 object->IterateBody(map->instance_type(), size, visitor); |
3908 cur += size; | 4232 cur += size; |
3909 } | 4233 } |
3910 } | 4234 } |
3911 | 4235 |
3912 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, | 4236 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
3913 MemoryChunk* chunk, | 4237 MemoryChunk* chunk, |
3914 PerPageData limits) { | 4238 PerPageData page_data) { |
3915 LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk)); | 4239 LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state); |
3916 HeapObject* object = NULL; | 4240 HeapObject* object = NULL; |
3917 while ((object = it.Next()) != NULL) { | 4241 while ((object = it.Next()) != NULL) { |
3918 Map* map = object->map(); | 4242 Map* map = object->map(); |
3919 int size = object->SizeFromMap(map); | 4243 int size = object->SizeFromMap(map); |
3920 object->IterateBody(map->instance_type(), size, visitor); | 4244 object->IterateBody(map->instance_type(), size, visitor); |
3921 } | 4245 } |
3922 } | 4246 } |
3923 }; | 4247 }; |
3924 | 4248 |
3925 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 4249 template <class MarkingStateProvider> |
| 4250 void UpdateToSpacePointersInParallel( |
| 4251 Heap* heap, base::Semaphore* semaphore, |
| 4252 const MarkingStateProvider& marking_state_provider) { |
3926 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 4253 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
3927 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 4254 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3928 Address space_start = heap->new_space()->bottom(); | 4255 Address space_start = heap->new_space()->bottom(); |
3929 Address space_end = heap->new_space()->top(); | 4256 Address space_end = heap->new_space()->top(); |
3930 for (Page* page : PageRange(space_start, space_end)) { | 4257 for (Page* page : PageRange(space_start, space_end)) { |
3931 Address start = | 4258 Address start = |
3932 page->Contains(space_start) ? space_start : page->area_start(); | 4259 page->Contains(space_start) ? space_start : page->area_start(); |
3933 Address end = page->Contains(space_end) ? space_end : page->area_end(); | 4260 Address end = page->Contains(space_end) ? space_end : page->area_end(); |
3934 job.AddPage(page, std::make_pair(start, end)); | 4261 job.AddPage(page, {start, end, marking_state_provider.marking_state(page)}); |
3935 } | 4262 } |
3936 PointersUpdatingVisitor visitor; | 4263 PointersUpdatingVisitor visitor; |
3937 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; | 4264 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
3938 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 4265 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
3939 } | 4266 } |
3940 | 4267 |
3941 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 4268 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
3942 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 4269 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
3943 | 4270 |
3944 | 4271 |
3945 { | 4272 { |
3946 TRACE_GC(heap()->tracer(), | 4273 TRACE_GC(heap()->tracer(), |
3947 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 4274 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
3948 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); | 4275 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| 4276 *this); |
3949 // Update roots. | 4277 // Update roots. |
3950 PointersUpdatingVisitor updating_visitor; | 4278 PointersUpdatingVisitor updating_visitor; |
3951 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 4279 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
3952 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); | 4280 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
3953 } | 4281 } |
3954 | 4282 |
3955 { | 4283 { |
3956 Heap* heap = this->heap(); | 4284 Heap* heap = this->heap(); |
3957 TRACE_GC(heap->tracer(), | 4285 TRACE_GC(heap->tracer(), |
3958 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 4286 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
3959 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); | 4287 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); |
3960 } | 4288 } |
3961 | 4289 |
3962 { | 4290 { |
3963 TRACE_GC(heap()->tracer(), | 4291 TRACE_GC(heap()->tracer(), |
3964 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 4292 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
3965 // Update pointers from external string table. | 4293 // Update pointers from external string table. |
3966 heap_->UpdateReferencesInExternalStringTable( | 4294 heap_->UpdateReferencesInExternalStringTable( |
3967 &UpdateReferenceInExternalStringTableEntry); | 4295 &UpdateReferenceInExternalStringTableEntry); |
3968 | 4296 |
3969 EvacuationWeakObjectRetainer evacuation_object_retainer; | 4297 EvacuationWeakObjectRetainer evacuation_object_retainer; |
3970 heap()->ProcessWeakListRoots(&evacuation_object_retainer); | 4298 heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
3971 } | 4299 } |
3972 } | 4300 } |
3973 | 4301 |
| 4302 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { |
| 4303 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
| 4304 |
| 4305 PointersUpdatingVisitor updating_visitor; |
| 4306 |
| 4307 { |
| 4308 TRACE_GC(heap()->tracer(), |
| 4309 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| 4310 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| 4311 *this); |
| 4312 // TODO(mlippautz): Iteration mode is not optimal as we process all |
| 4313 // global handles. Find a way to only process the ones related to new |
| 4314 // space. |
| 4315 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| 4316 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
| 4317 } |
| 4318 |
| 4319 { |
| 4320 TRACE_GC(heap()->tracer(), |
| 4321 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
| 4322 |
| 4323 EvacuationWeakObjectRetainer evacuation_object_retainer; |
| 4324 heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
| 4325 |
| 4326 // Update pointers from external string table. |
| 4327 heap()->UpdateNewSpaceReferencesInExternalStringTable( |
| 4328 &UpdateReferenceInExternalStringTableEntry); |
| 4329 heap()->IterateEncounteredWeakCollections(&updating_visitor); |
| 4330 } |
| 4331 } |
3974 | 4332 |
3975 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 4333 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
3976 for (Page* p : old_space_evacuation_pages_) { | 4334 for (Page* p : old_space_evacuation_pages_) { |
3977 if (!p->IsEvacuationCandidate()) continue; | 4335 if (!p->IsEvacuationCandidate()) continue; |
3978 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 4336 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3979 MarkingState::Internal(p).SetLiveBytes(0); | 4337 MarkingState::Internal(p).SetLiveBytes(0); |
3980 CHECK(p->SweepingDone()); | 4338 CHECK(p->SweepingDone()); |
3981 space->ReleasePage(p); | 4339 space->ReleasePage(p); |
3982 } | 4340 } |
3983 old_space_evacuation_pages_.Rewind(0); | 4341 old_space_evacuation_pages_.Rewind(0); |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4079 | 4437 |
4080 int will_be_swept = 0; | 4438 int will_be_swept = 0; |
4081 bool unused_page_present = false; | 4439 bool unused_page_present = false; |
4082 | 4440 |
4083 // Loop needs to support deletion if live bytes == 0 for a page. | 4441 // Loop needs to support deletion if live bytes == 0 for a page. |
4084 for (auto it = space->begin(); it != space->end();) { | 4442 for (auto it = space->begin(); it != space->end();) { |
4085 Page* p = *(it++); | 4443 Page* p = *(it++); |
4086 DCHECK(p->SweepingDone()); | 4444 DCHECK(p->SweepingDone()); |
4087 | 4445 |
4088 if (p->IsEvacuationCandidate()) { | 4446 if (p->IsEvacuationCandidate()) { |
4089 // Will be processed in EvacuateNewSpaceAndCandidates. | 4447 // Will be processed in Evacuate. |
4090 DCHECK(evacuation_candidates_.length() > 0); | 4448 DCHECK(evacuation_candidates_.length() > 0); |
4091 continue; | 4449 continue; |
4092 } | 4450 } |
4093 | 4451 |
4094 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 4452 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
4095 // We need to sweep the page to get it into an iterable state again. Note | 4453 // We need to sweep the page to get it into an iterable state again. Note |
4096 // that this adds unusable memory into the free list that is later on | 4454 // that this adds unusable memory into the free list that is later on |
4097 // (in the free list) dropped again. Since we only use the flag for | 4455 // (in the free list) dropped again. Since we only use the flag for |
4098 // testing this is fine. | 4456 // testing this is fine. |
4099 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 4457 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4186 // The target is always in old space, we don't have to record the slot in | 4544 // The target is always in old space, we don't have to record the slot in |
4187 // the old-to-new remembered set. | 4545 // the old-to-new remembered set. |
4188 DCHECK(!heap()->InNewSpace(target)); | 4546 DCHECK(!heap()->InNewSpace(target)); |
4189 RecordRelocSlot(host, &rinfo, target); | 4547 RecordRelocSlot(host, &rinfo, target); |
4190 } | 4548 } |
4191 } | 4549 } |
4192 } | 4550 } |
4193 | 4551 |
4194 } // namespace internal | 4552 } // namespace internal |
4195 } // namespace v8 | 4553 } // namespace v8 |
OLD | NEW |