Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(69)

Side by Side Diff: src/heap/mark-compact.cc

Issue 2855143003: [heap] Minor MC: Implement page moving (Closed)
Patch Set: Disable flag Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after
440 } 440 }
441 441
442 return compacting_; 442 return compacting_;
443 } 443 }
444 444
445 void MarkCompactCollector::CollectGarbage() { 445 void MarkCompactCollector::CollectGarbage() {
446 // Make sure that Prepare() has been called. The individual steps below will 446 // Make sure that Prepare() has been called. The individual steps below will
447 // update the state as they proceed. 447 // update the state as they proceed.
448 DCHECK(state_ == PREPARE_GC); 448 DCHECK(state_ == PREPARE_GC);
449 449
450 heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
451
450 MarkLiveObjects(); 452 MarkLiveObjects();
451 453
452 DCHECK(heap_->incremental_marking()->IsStopped()); 454 DCHECK(heap_->incremental_marking()->IsStopped());
453 455
454 ClearNonLiveReferences(); 456 ClearNonLiveReferences();
455 457
456 RecordObjectStats(); 458 RecordObjectStats();
457 459
458 #ifdef VERIFY_HEAP 460 #ifdef VERIFY_HEAP
459 if (FLAG_verify_heap) { 461 if (FLAG_verify_heap) {
(...skipping 991 matching lines...) Expand 10 before | Expand all | Expand 10 after
1451 heap()->isolate()->thread_manager()->IterateArchivedThreads( 1453 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1452 &code_marking_visitor); 1454 &code_marking_visitor);
1453 1455
1454 SharedFunctionInfoMarkingVisitor visitor(this); 1456 SharedFunctionInfoMarkingVisitor visitor(this);
1455 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); 1457 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1456 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); 1458 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1457 1459
1458 ProcessMarkingDeque(); 1460 ProcessMarkingDeque();
1459 } 1461 }
1460 1462
1463 void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
1464 for (Page* p : sweep_to_iterate_pages_) {
1465 if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
1466 p->ClearFlag(Page::SWEEP_TO_ITERATE);
1467 marking_state(p).ClearLiveness();
1468 }
1469 }
1470 sweep_to_iterate_pages_.clear();
1471 }
1472
1461 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor { 1473 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
1462 public: 1474 public:
1463 explicit RootMarkingVisitor(MinorMarkCompactCollector* collector) 1475 explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
1464 : collector_(collector) {} 1476 : collector_(collector) {}
1465 1477
1466 void VisitRootPointer(Root root, Object** p) override { 1478 void VisitRootPointer(Root root, Object** p) override {
1467 MarkObjectByPointer(p); 1479 MarkObjectByPointer(p);
1468 } 1480 }
1469 1481
1470 void VisitRootPointers(Root root, Object** start, Object** end) override { 1482 void VisitRootPointers(Root root, Object** start, Object** end) override {
(...skipping 1110 matching lines...) Expand 10 before | Expand all | Expand 10 after
2581 Map* map = object->map(); 2593 Map* map = object->map();
2582 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( 2594 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
2583 object, MarkingState::External(object)))); 2595 object, MarkingState::External(object))));
2584 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); 2596 StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
2585 } 2597 }
2586 } 2598 }
2587 2599
2588 void MinorMarkCompactCollector::CollectGarbage() { 2600 void MinorMarkCompactCollector::CollectGarbage() {
2589 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); 2601 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
2590 2602
2603 CleanupSweepToIteratePages();
2604
2591 MarkLiveObjects(); 2605 MarkLiveObjects();
2592 ClearNonLiveReferences(); 2606 ClearNonLiveReferences();
2593 #ifdef VERIFY_HEAP 2607 #ifdef VERIFY_HEAP
2594 if (FLAG_verify_heap) { 2608 if (FLAG_verify_heap) {
2595 YoungGenerationMarkingVerifier verifier(heap()); 2609 YoungGenerationMarkingVerifier verifier(heap());
2596 verifier.Run(); 2610 verifier.Run();
2597 } 2611 }
2598 #endif // VERIFY_HEAP 2612 #endif // VERIFY_HEAP
2599 2613
2600 Evacuate(); 2614 Evacuate();
2601 #ifdef VERIFY_HEAP 2615 #ifdef VERIFY_HEAP
2602 if (FLAG_verify_heap) { 2616 if (FLAG_verify_heap) {
2603 YoungGenerationEvacuationVerifier verifier(heap()); 2617 YoungGenerationEvacuationVerifier verifier(heap());
2604 verifier.Run(); 2618 verifier.Run();
2605 } 2619 }
2606 #endif // VERIFY_HEAP 2620 #endif // VERIFY_HEAP
2607 2621
2608 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); 2622 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge();
2609 2623
2610 { 2624 {
2611 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); 2625 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS);
2612 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), 2626 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
2613 heap()->new_space()->FromSpaceEnd())) { 2627 heap()->new_space()->FromSpaceEnd())) {
2628 DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
2614 marking_state(p).ClearLiveness(); 2629 marking_state(p).ClearLiveness();
2615 } 2630 }
2616 } 2631 }
2617 2632
2618 heap()->account_external_memory_concurrently_freed(); 2633 heap()->account_external_memory_concurrently_freed();
2619 } 2634 }
2620 2635
2636 void MinorMarkCompactCollector::MakeIterable(
2637 Page* p, MarkingTreatmentMode marking_mode,
2638 FreeSpaceTreatmentMode free_space_mode) {
2639 // We have to clear the full collectors markbits for the areas that we
2640 // remove here.
2641 MarkCompactCollector* full_collector = heap()->mark_compact_collector();
2642 Address free_start = p->area_start();
2643 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
2644 LiveObjectIterator<kBlackObjects> it(p, marking_state(p));
2645 HeapObject* object = nullptr;
2646
2647 while ((object = it.Next()) != nullptr) {
2648 DCHECK(ObjectMarking::IsBlack(object, marking_state(object)));
2649 Address free_end = object->address();
2650 if (free_end != free_start) {
2651 CHECK_GT(free_end, free_start);
2652 size_t size = static_cast<size_t>(free_end - free_start);
2653 if (free_space_mode == ZAP_FREE_SPACE) {
2654 memset(free_start, 0xcc, size);
2655 full_collector->marking_state(p).bitmap()->ClearRange(
2656 p->AddressToMarkbitIndex(free_start),
2657 p->AddressToMarkbitIndex(free_end));
2658 }
2659 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
2660 ClearRecordedSlots::kNo);
2661 }
2662 Map* map = object->synchronized_map();
2663 int size = object->SizeFromMap(map);
2664 free_start = free_end + size;
2665 }
2666
2667 if (free_start != p->area_end()) {
2668 CHECK_GT(p->area_end(), free_start);
2669 size_t size = static_cast<size_t>(p->area_end() - free_start);
2670 if (free_space_mode == ZAP_FREE_SPACE) {
2671 memset(free_start, 0xcc, size);
2672 full_collector->marking_state(p).bitmap()->ClearRange(
2673 p->AddressToMarkbitIndex(free_start),
2674 p->AddressToMarkbitIndex(p->area_end()));
2675 }
2676 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
2677 ClearRecordedSlots::kNo);
2678 }
2679
2680 if (marking_mode == MarkingTreatmentMode::CLEAR) {
2681 marking_state(p).ClearLiveness();
2682 p->ClearFlag(Page::SWEEP_TO_ITERATE);
2683 }
2684 }
2685
2621 void MinorMarkCompactCollector::ClearNonLiveReferences() { 2686 void MinorMarkCompactCollector::ClearNonLiveReferences() {
2622 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); 2687 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2623 2688
2624 { 2689 {
2625 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); 2690 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2626 // Internalized strings are always stored in old space, so there is no need 2691 // Internalized strings are always stored in old space, so there is no need
2627 // to clean them here. 2692 // to clean them here.
2628 YoungGenerationExternalStringTableCleaner external_visitor(*this); 2693 YoungGenerationExternalStringTableCleaner external_visitor(*this);
2629 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); 2694 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
2630 heap()->external_string_table_.CleanUpNewSpaceStrings(); 2695 heap()->external_string_table_.CleanUpNewSpaceStrings();
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2673 if (!heap()->new_space()->Rebalance()) { 2738 if (!heap()->new_space()->Rebalance()) {
2674 FatalProcessOutOfMemory("NewSpace::Rebalance"); 2739 FatalProcessOutOfMemory("NewSpace::Rebalance");
2675 } 2740 }
2676 } 2741 }
2677 2742
2678 // Give pages that are queued to be freed back to the OS. 2743 // Give pages that are queued to be freed back to the OS.
2679 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 2744 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2680 2745
2681 { 2746 {
2682 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 2747 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
2683 // TODO(mlippautz): Implement page promotion. 2748 for (Page* p : new_space_evacuation_pages_) {
2749 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
2750 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
2751 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
2752 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
2753 p->SetFlag(Page::SWEEP_TO_ITERATE);
2754 sweep_to_iterate_pages_.push_back(p);
2755 }
2756 }
2684 new_space_evacuation_pages_.Rewind(0); 2757 new_space_evacuation_pages_.Rewind(0);
2685 } 2758 }
2686 2759
2687 { 2760 {
2688 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); 2761 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
2689 EvacuateEpilogue(); 2762 EvacuateEpilogue();
2690 } 2763 }
2691 } 2764 }
2692 2765
2693 void MarkCompactCollector::MarkLiveObjects() { 2766 void MarkCompactCollector::MarkLiveObjects() {
(...skipping 870 matching lines...) Expand 10 before | Expand all | Expand 10 after
3564 *live_bytes = state.live_bytes(); 3637 *live_bytes = state.live_bytes();
3565 switch (ComputeEvacuationMode(page)) { 3638 switch (ComputeEvacuationMode(page)) {
3566 case kObjectsNewToOld: 3639 case kObjectsNewToOld:
3567 success = object_visitor.VisitBlackObjects( 3640 success = object_visitor.VisitBlackObjects(
3568 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); 3641 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
3569 DCHECK(success); 3642 DCHECK(success);
3570 ArrayBufferTracker::ProcessBuffers( 3643 ArrayBufferTracker::ProcessBuffers(
3571 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); 3644 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3572 break; 3645 break;
3573 case kPageNewToOld: 3646 case kPageNewToOld:
3574 // TODO(mlippautz): Implement page promotion. 3647 success = object_visitor.VisitBlackObjects(
3575 UNREACHABLE(); 3648 page, state, &new_to_old_page_visitor_,
3649 LiveObjectVisitor::kKeepMarking);
3650 DCHECK(success);
3651 new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
3652 // TODO(mlippautz): If cleaning array buffers is too slow here we can
3653 // delay it until the next GC.
3654 ArrayBufferTracker::FreeDead(page, state);
3655 if (heap()->ShouldZapGarbage())
3656 collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3657 ZAP_FREE_SPACE);
3576 break; 3658 break;
3577 case kPageNewToNew: 3659 case kPageNewToNew:
3578 // TODO(mlippautz): Implement page promotion. 3660 success = object_visitor.VisitBlackObjects(
3579 UNREACHABLE(); 3661 page, state, &new_to_new_page_visitor_,
3662 LiveObjectVisitor::kKeepMarking);
3663 DCHECK(success);
3664 new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
3665 // TODO(mlippautz): If cleaning array buffers is too slow here we can
3666 // delay it until the next GC.
3667 ArrayBufferTracker::FreeDead(page, state);
3668 if (heap()->ShouldZapGarbage())
3669 collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3670 ZAP_FREE_SPACE);
3580 break; 3671 break;
3581 case kObjectsOldToOld: 3672 case kObjectsOldToOld:
3582 UNREACHABLE(); 3673 UNREACHABLE();
3583 break; 3674 break;
3584 } 3675 }
3585 return success; 3676 return success;
3586 } 3677 }
3587 3678
3588 class EvacuationJobTraits { 3679 class EvacuationJobTraits {
3589 public: 3680 public:
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
3682 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS 3773 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3683 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", 3774 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3684 isolate()->time_millis_since_init(), 3775 isolate()->time_millis_since_init(),
3685 FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(), 3776 FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(),
3686 abandoned_pages, wanted_num_tasks, job->NumberOfTasks(), 3777 abandoned_pages, wanted_num_tasks, job->NumberOfTasks(),
3687 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), 3778 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3688 live_bytes, compaction_speed); 3779 live_bytes, compaction_speed);
3689 } 3780 }
3690 } 3781 }
3691 3782
3783 bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
3784 const bool reduce_memory = heap()->ShouldReduceMemory();
3785 const Address age_mark = heap()->new_space()->age_mark();
3786 return !reduce_memory && !p->NeverEvacuate() &&
3787 (live_bytes > Evacuator::PageEvacuationThreshold()) &&
3788 !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
3789 }
3790
3692 void MarkCompactCollector::EvacuatePagesInParallel() { 3791 void MarkCompactCollector::EvacuatePagesInParallel() {
3693 PageParallelJob<EvacuationJobTraits> job( 3792 PageParallelJob<EvacuationJobTraits> job(
3694 heap_, heap_->isolate()->cancelable_task_manager(), 3793 heap_, heap_->isolate()->cancelable_task_manager(),
3695 &page_parallel_job_semaphore_); 3794 &page_parallel_job_semaphore_);
3696 3795
3697 int abandoned_pages = 0; 3796 int abandoned_pages = 0;
3698 intptr_t live_bytes = 0; 3797 intptr_t live_bytes = 0;
3699 for (Page* page : old_space_evacuation_pages_) { 3798 for (Page* page : old_space_evacuation_pages_) {
3700 live_bytes += MarkingState::Internal(page).live_bytes(); 3799 live_bytes += MarkingState::Internal(page).live_bytes();
3701 job.AddPage(page, {&abandoned_pages, marking_state(page)}); 3800 job.AddPage(page, {&abandoned_pages, marking_state(page)});
3702 } 3801 }
3703 3802
3704 const bool reduce_memory = heap()->ShouldReduceMemory();
3705 const Address age_mark = heap()->new_space()->age_mark();
3706 for (Page* page : new_space_evacuation_pages_) { 3803 for (Page* page : new_space_evacuation_pages_) {
3707 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes(); 3804 intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes();
3708 live_bytes += live_bytes_on_page; 3805 live_bytes += live_bytes_on_page;
3709 if (!reduce_memory && !page->NeverEvacuate() && 3806 if (ShouldMovePage(page, live_bytes_on_page)) {
3710 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) &&
3711 !page->Contains(age_mark) &&
3712 heap()->CanExpandOldGeneration(live_bytes_on_page)) {
3713 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { 3807 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3714 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); 3808 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3715 } else { 3809 } else {
3716 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); 3810 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3717 } 3811 }
3718 } 3812 }
3719
3720 job.AddPage(page, {&abandoned_pages, marking_state(page)}); 3813 job.AddPage(page, {&abandoned_pages, marking_state(page)});
3721 } 3814 }
3722 DCHECK_GE(job.NumberOfPages(), 1); 3815 DCHECK_GE(job.NumberOfPages(), 1);
3723 3816
3724 RecordMigratedSlotVisitor record_visitor(this); 3817 RecordMigratedSlotVisitor record_visitor(this);
3725 CreateAndExecuteEvacuationTasks<FullEvacuator>( 3818 CreateAndExecuteEvacuationTasks<FullEvacuator>(
3726 this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages); 3819 this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages);
3727 } 3820 }
3728 3821
3729 void MinorMarkCompactCollector::EvacuatePagesInParallel() { 3822 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
3730 PageParallelJob<EvacuationJobTraits> job( 3823 PageParallelJob<EvacuationJobTraits> job(
3731 heap_, heap_->isolate()->cancelable_task_manager(), 3824 heap_, heap_->isolate()->cancelable_task_manager(),
3732 &page_parallel_job_semaphore_); 3825 &page_parallel_job_semaphore_);
3733 int abandoned_pages = 0; 3826 int abandoned_pages = 0;
3734 intptr_t live_bytes = 0; 3827 intptr_t live_bytes = 0;
3735 3828
3736 for (Page* page : new_space_evacuation_pages_) { 3829 for (Page* page : new_space_evacuation_pages_) {
3737 intptr_t live_bytes_on_page = marking_state(page).live_bytes(); 3830 intptr_t live_bytes_on_page = marking_state(page).live_bytes();
3738 live_bytes += live_bytes_on_page; 3831 live_bytes += live_bytes_on_page;
3739 // TODO(mlippautz): Implement page promotion. 3832 if (ShouldMovePage(page, live_bytes_on_page)) {
3833 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3834 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3835 } else {
3836 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3837 }
3838 }
3740 job.AddPage(page, {&abandoned_pages, marking_state(page)}); 3839 job.AddPage(page, {&abandoned_pages, marking_state(page)});
3741 } 3840 }
3742 DCHECK_GE(job.NumberOfPages(), 1); 3841 DCHECK_GE(job.NumberOfPages(), 1);
3743 3842
3744 YoungGenerationMigrationObserver observer(heap(), 3843 YoungGenerationMigrationObserver observer(heap(),
3745 heap()->mark_compact_collector()); 3844 heap()->mark_compact_collector());
3746 YoungGenerationRecordMigratedSlotVisitor record_visitor( 3845 YoungGenerationRecordMigratedSlotVisitor record_visitor(
3747 heap()->mark_compact_collector()); 3846 heap()->mark_compact_collector());
3748 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( 3847 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
3749 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); 3848 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages);
(...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after
4053 FullEvacuationVerifier verifier(heap()); 4152 FullEvacuationVerifier verifier(heap());
4054 verifier.Run(); 4153 verifier.Run();
4055 } 4154 }
4056 #endif 4155 #endif
4057 } 4156 }
4058 4157
4059 template <RememberedSetType type> 4158 template <RememberedSetType type>
4060 class PointerUpdateJobTraits { 4159 class PointerUpdateJobTraits {
4061 public: 4160 public:
4062 typedef int PerPageData; // Per page data is not used in this job. 4161 typedef int PerPageData; // Per page data is not used in this job.
4063 typedef int PerTaskData; // Per task data is not used in this job. 4162 typedef const MarkCompactCollectorBase* PerTaskData;
4064 4163
4065 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, 4164 static bool ProcessPageInParallel(Heap* heap, PerTaskData task_data,
4066 PerPageData) { 4165 MemoryChunk* chunk, PerPageData) {
4067 UpdateUntypedPointers(heap, chunk); 4166 UpdateUntypedPointers(heap, chunk, task_data);
4068 UpdateTypedPointers(heap, chunk); 4167 UpdateTypedPointers(heap, chunk, task_data);
4069 return true; 4168 return true;
4070 } 4169 }
4071 static const bool NeedSequentialFinalization = false; 4170 static const bool NeedSequentialFinalization = false;
4072 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { 4171 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
4073 } 4172 }
4074 4173
4075 private: 4174 private:
4076 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { 4175 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
4176 const MarkCompactCollectorBase* collector) {
4077 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); 4177 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
4078 if (type == OLD_TO_NEW) { 4178 if (type == OLD_TO_NEW) {
4079 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { 4179 RememberedSet<OLD_TO_NEW>::Iterate(
4080 return CheckAndUpdateOldToNewSlot(heap, slot); 4180 chunk, [heap, collector](Address slot) {
4081 }); 4181 return CheckAndUpdateOldToNewSlot(heap, slot, collector);
4182 });
4082 } else { 4183 } else {
4083 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { 4184 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
4084 return UpdateSlot(reinterpret_cast<Object**>(slot)); 4185 return UpdateSlot(reinterpret_cast<Object**>(slot));
4085 }); 4186 });
4086 } 4187 }
4087 } 4188 }
4088 4189
4089 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { 4190 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
4191 const MarkCompactCollectorBase* collector) {
4090 if (type == OLD_TO_OLD) { 4192 if (type == OLD_TO_OLD) {
4091 Isolate* isolate = heap->isolate(); 4193 Isolate* isolate = heap->isolate();
4092 RememberedSet<OLD_TO_OLD>::IterateTyped( 4194 RememberedSet<OLD_TO_OLD>::IterateTyped(
4093 chunk, 4195 chunk,
4094 [isolate](SlotType slot_type, Address host_addr, Address slot) { 4196 [isolate](SlotType slot_type, Address host_addr, Address slot) {
4095 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type, 4197 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
4096 slot, UpdateSlot); 4198 slot, UpdateSlot);
4097 }); 4199 });
4098 } else { 4200 } else {
4099 Isolate* isolate = heap->isolate(); 4201 Isolate* isolate = heap->isolate();
4100 RememberedSet<OLD_TO_NEW>::IterateTyped( 4202 RememberedSet<OLD_TO_NEW>::IterateTyped(
4101 chunk, 4203 chunk, [isolate, heap, collector](SlotType slot_type,
4102 [isolate, heap](SlotType slot_type, Address host_addr, Address slot) { 4204 Address host_addr, Address slot) {
4103 return UpdateTypedSlotHelper::UpdateTypedSlot( 4205 return UpdateTypedSlotHelper::UpdateTypedSlot(
4104 isolate, slot_type, slot, [heap](Object** slot) { 4206 isolate, slot_type, slot, [heap, collector](Object** slot) {
4105 return CheckAndUpdateOldToNewSlot( 4207 return CheckAndUpdateOldToNewSlot(
4106 heap, reinterpret_cast<Address>(slot)); 4208 heap, reinterpret_cast<Address>(slot), collector);
4107 }); 4209 });
4108 }); 4210 });
4109 } 4211 }
4110 } 4212 }
4111 4213
4112 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, 4214 static SlotCallbackResult CheckAndUpdateOldToNewSlot(
4113 Address slot_address) { 4215 Heap* heap, Address slot_address,
4216 const MarkCompactCollectorBase* collector) {
4114 // There may be concurrent action on slots in dead objects. Concurrent 4217 // There may be concurrent action on slots in dead objects. Concurrent
4115 // sweeper threads may overwrite the slot content with a free space object. 4218 // sweeper threads may overwrite the slot content with a free space object.
4116 // Moreover, the pointed-to object may also get concurrently overwritten 4219 // Moreover, the pointed-to object may also get concurrently overwritten
4117 // with a free space object. The sweeper always gets priority performing 4220 // with a free space object. The sweeper always gets priority performing
4118 // these writes. 4221 // these writes.
4119 base::NoBarrierAtomicValue<Object*>* slot = 4222 base::NoBarrierAtomicValue<Object*>* slot =
4120 base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address); 4223 base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
4121 Object* slot_reference = slot->Value(); 4224 Object* slot_reference = slot->Value();
4122 if (heap->InFromSpace(slot_reference)) { 4225 if (heap->InFromSpace(slot_reference)) {
4123 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference); 4226 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
(...skipping 19 matching lines...) Expand all
4143 if (heap->InToSpace(slot->Value())) { 4246 if (heap->InToSpace(slot->Value())) {
4144 return KEEP_SLOT; 4247 return KEEP_SLOT;
4145 } 4248 }
4146 } else if (heap->InToSpace(slot_reference)) { 4249 } else if (heap->InToSpace(slot_reference)) {
4147 // Slots can point to "to" space if the page has been moved, or if the 4250 // Slots can point to "to" space if the page has been moved, or if the
4148 // slot has been recorded multiple times in the remembered set. Since 4251 // slot has been recorded multiple times in the remembered set. Since
4149 // there is no forwarding information present we need to check the 4252 // there is no forwarding information present we need to check the
4150 // markbits to determine liveness. 4253 // markbits to determine liveness.
4151 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference); 4254 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
4152 if (ObjectMarking::IsBlack(heap_object, 4255 if (ObjectMarking::IsBlack(heap_object,
4153 MarkingState::Internal(heap_object))) 4256 collector->marking_state(heap_object)))
4154 return KEEP_SLOT; 4257 return KEEP_SLOT;
4155 } else { 4258 } else {
4156 DCHECK(!heap->InNewSpace(slot_reference)); 4259 DCHECK(!heap->InNewSpace(slot_reference));
4157 } 4260 }
4158 return REMOVE_SLOT; 4261 return REMOVE_SLOT;
4159 } 4262 }
4160 }; 4263 };
4161 4264
4162 int NumberOfPointerUpdateTasks(int pages) { 4265 int NumberOfPointerUpdateTasks(int pages) {
4163 if (!FLAG_parallel_pointer_update) return 1; 4266 if (!FLAG_parallel_pointer_update) return 1;
4164 const int available_cores = Max( 4267 const int available_cores = Max(
4165 1, static_cast<int>( 4268 1, static_cast<int>(
4166 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); 4269 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
4167 const int kPagesPerTask = 4; 4270 const int kPagesPerTask = 4;
4168 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); 4271 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
4169 } 4272 }
4170 4273
4171 template <RememberedSetType type> 4274 template <RememberedSetType type>
4172 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { 4275 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore,
4276 const MarkCompactCollectorBase* collector) {
4173 PageParallelJob<PointerUpdateJobTraits<type> > job( 4277 PageParallelJob<PointerUpdateJobTraits<type> > job(
4174 heap, heap->isolate()->cancelable_task_manager(), semaphore); 4278 heap, heap->isolate()->cancelable_task_manager(), semaphore);
4175 RememberedSet<type>::IterateMemoryChunks( 4279 RememberedSet<type>::IterateMemoryChunks(
4176 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); 4280 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
4177 int num_pages = job.NumberOfPages(); 4281 int num_pages = job.NumberOfPages();
4178 int num_tasks = NumberOfPointerUpdateTasks(num_pages); 4282 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
4179 job.Run(num_tasks, [](int i) { return 0; }); 4283 job.Run(num_tasks, [collector](int i) { return collector; });
4180 } 4284 }
4181 4285
4182 class ToSpacePointerUpdateJobTraits { 4286 class ToSpacePointerUpdateJobTraits {
4183 public: 4287 public:
4184 struct PageData { 4288 struct PageData {
4185 Address start; 4289 Address start;
4186 Address end; 4290 Address end;
4187 MarkingState marking_state; 4291 MarkingState marking_state;
4188 }; 4292 };
4189 4293
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
4256 4360
4257 4361
4258 { 4362 {
4259 TRACE_GC(heap()->tracer(), 4363 TRACE_GC(heap()->tracer(),
4260 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); 4364 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
4261 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, 4365 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
4262 *this); 4366 *this);
4263 // Update roots. 4367 // Update roots.
4264 PointersUpdatingVisitor updating_visitor; 4368 PointersUpdatingVisitor updating_visitor;
4265 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 4369 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
4266 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); 4370 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
4371 this);
4267 } 4372 }
4268 4373
4269 { 4374 {
4270 Heap* heap = this->heap(); 4375 Heap* heap = this->heap();
4271 TRACE_GC(heap->tracer(), 4376 TRACE_GC(heap->tracer(),
4272 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); 4377 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
4273 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); 4378 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_,
4379 this);
4274 } 4380 }
4275 4381
4276 { 4382 {
4277 TRACE_GC(heap()->tracer(), 4383 TRACE_GC(heap()->tracer(),
4278 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); 4384 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4279 // Update pointers from external string table. 4385 // Update pointers from external string table.
4280 heap_->UpdateReferencesInExternalStringTable( 4386 heap_->UpdateReferencesInExternalStringTable(
4281 &UpdateReferenceInExternalStringTableEntry); 4387 &UpdateReferenceInExternalStringTableEntry);
4282 4388
4283 EvacuationWeakObjectRetainer evacuation_object_retainer; 4389 EvacuationWeakObjectRetainer evacuation_object_retainer;
4284 heap()->ProcessWeakListRoots(&evacuation_object_retainer); 4390 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4285 } 4391 }
4286 } 4392 }
4287 4393
4288 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { 4394 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
4289 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); 4395 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
4290 4396
4291 PointersUpdatingVisitor updating_visitor; 4397 PointersUpdatingVisitor updating_visitor;
4292 4398
4293 { 4399 {
4294 TRACE_GC(heap()->tracer(), 4400 TRACE_GC(heap()->tracer(),
4295 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); 4401 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
4296 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, 4402 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
4297 *this); 4403 *this);
4298 // TODO(mlippautz): Iteration mode is not optimal as we process all 4404 // TODO(mlippautz): Iteration mode is not optimal as we process all
4299 // global handles. Find a way to only process the ones related to new 4405 // global handles. Find a way to only process the ones related to new
4300 // space. 4406 // space.
4301 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 4407 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
4302 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); 4408 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
4409 this);
4303 } 4410 }
4304 4411
4305 { 4412 {
4306 TRACE_GC(heap()->tracer(), 4413 TRACE_GC(heap()->tracer(),
4307 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); 4414 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4308 4415
4309 EvacuationWeakObjectRetainer evacuation_object_retainer; 4416 EvacuationWeakObjectRetainer evacuation_object_retainer;
4310 heap()->ProcessWeakListRoots(&evacuation_object_retainer); 4417 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4311 4418
4312 // Update pointers from external string table. 4419 // Update pointers from external string table.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4350 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, 4457 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
4351 AllocationSpace identity) { 4458 AllocationSpace identity) {
4352 int max_freed = 0; 4459 int max_freed = 0;
4353 { 4460 {
4354 base::LockGuard<base::RecursiveMutex> guard(page->mutex()); 4461 base::LockGuard<base::RecursiveMutex> guard(page->mutex());
4355 // If this page was already swept in the meantime, we can return here. 4462 // If this page was already swept in the meantime, we can return here.
4356 if (page->SweepingDone()) return 0; 4463 if (page->SweepingDone()) return 0;
4357 DCHECK_EQ(Page::kSweepingPending, 4464 DCHECK_EQ(Page::kSweepingPending,
4358 page->concurrent_sweeping_state().Value()); 4465 page->concurrent_sweeping_state().Value());
4359 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 4466 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4360 const Sweeper::FreeSpaceTreatmentMode free_space_mode = 4467 const FreeSpaceTreatmentMode free_space_mode =
4361 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; 4468 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
4362 if (identity == NEW_SPACE) { 4469 if (identity == NEW_SPACE) {
4363 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); 4470 RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
4364 } else { 4471 } else {
4365 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); 4472 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
4366 } 4473 }
4367 DCHECK(page->SweepingDone()); 4474 DCHECK(page->SweepingDone());
4368 4475
4369 // After finishing sweeping of a page we clean up its remembered set. 4476 // After finishing sweeping of a page we clean up its remembered set.
4370 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>(); 4477 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
4435 continue; 4542 continue;
4436 } 4543 }
4437 4544
4438 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { 4545 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
4439 // We need to sweep the page to get it into an iterable state again. Note 4546 // We need to sweep the page to get it into an iterable state again. Note
4440 // that this adds unusable memory into the free list that is later on 4547 // that this adds unusable memory into the free list that is later on
4441 // (in the free list) dropped again. Since we only use the flag for 4548 // (in the free list) dropped again. Since we only use the flag for
4442 // testing this is fine. 4549 // testing this is fine.
4443 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 4550 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4444 Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST, 4551 Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
4445 Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE 4552 Heap::ShouldZapGarbage()
4446 : Sweeper::IGNORE_FREE_SPACE); 4553 ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
4554 : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
4447 continue; 4555 continue;
4448 } 4556 }
4449 4557
4450 // One unused page is kept, all further are released before sweeping them. 4558 // One unused page is kept, all further are released before sweeping them.
4451 if (MarkingState::Internal(p).live_bytes() == 0) { 4559 if (MarkingState::Internal(p).live_bytes() == 0) {
4452 if (unused_page_present) { 4560 if (unused_page_present) {
4453 if (FLAG_gc_verbose) { 4561 if (FLAG_gc_verbose) {
4454 PrintIsolate(isolate(), "sweeping: released page: %p", 4562 PrintIsolate(isolate(), "sweeping: released page: %p",
4455 static_cast<void*>(p)); 4563 static_cast<void*>(p));
4456 } 4564 }
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
4530 // The target is always in old space, we don't have to record the slot in 4638 // The target is always in old space, we don't have to record the slot in
4531 // the old-to-new remembered set. 4639 // the old-to-new remembered set.
4532 DCHECK(!heap()->InNewSpace(target)); 4640 DCHECK(!heap()->InNewSpace(target));
4533 RecordRelocSlot(host, &rinfo, target); 4641 RecordRelocSlot(host, &rinfo, target);
4534 } 4642 }
4535 } 4643 }
4536 } 4644 }
4537 4645
4538 } // namespace internal 4646 } // namespace internal
4539 } // namespace v8 4647 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698