Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(63)

Side by Side Diff: src/heap/mark-compact.cc

Issue 2855143003: [heap] Minor MC: Implement page moving (Closed)
Patch Set: Polishing Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after
440 } 440 }
441 441
442 return compacting_; 442 return compacting_;
443 } 443 }
444 444
445 void MarkCompactCollector::CollectGarbage() { 445 void MarkCompactCollector::CollectGarbage() {
446 // Make sure that Prepare() has been called. The individual steps below will 446 // Make sure that Prepare() has been called. The individual steps below will
447 // update the state as they proceed. 447 // update the state as they proceed.
448 DCHECK(state_ == PREPARE_GC); 448 DCHECK(state_ == PREPARE_GC);
449 449
450 heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
451
450 MarkLiveObjects(); 452 MarkLiveObjects();
451 453
452 DCHECK(heap_->incremental_marking()->IsStopped()); 454 DCHECK(heap_->incremental_marking()->IsStopped());
453 455
454 ClearNonLiveReferences(); 456 ClearNonLiveReferences();
455 457
456 RecordObjectStats(); 458 RecordObjectStats();
457 459
458 #ifdef VERIFY_HEAP 460 #ifdef VERIFY_HEAP
459 if (FLAG_verify_heap) { 461 if (FLAG_verify_heap) {
(...skipping 991 matching lines...) Expand 10 before | Expand all | Expand 10 after
1451 heap()->isolate()->thread_manager()->IterateArchivedThreads( 1453 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1452 &code_marking_visitor); 1454 &code_marking_visitor);
1453 1455
1454 SharedFunctionInfoMarkingVisitor visitor(this); 1456 SharedFunctionInfoMarkingVisitor visitor(this);
1455 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); 1457 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1456 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); 1458 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1457 1459
1458 ProcessMarkingDeque(); 1460 ProcessMarkingDeque();
1459 } 1461 }
1460 1462
1463 void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
1464 for (Page* p : sweep_to_iterate_pages_) {
1465 if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
1466 p->ClearFlag(Page::SWEEP_TO_ITERATE);
1467 marking_state(p).ClearLiveness();
1468 }
1469 }
1470 sweep_to_iterate_pages_.clear();
1471 }
1472
1461 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor { 1473 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
1462 public: 1474 public:
1463 explicit RootMarkingVisitor(MinorMarkCompactCollector* collector) 1475 explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
1464 : collector_(collector) {} 1476 : collector_(collector) {}
1465 1477
1466 void VisitRootPointer(Root root, Object** p) override { 1478 void VisitRootPointer(Root root, Object** p) override {
1467 MarkObjectByPointer(p); 1479 MarkObjectByPointer(p);
1468 } 1480 }
1469 1481
1470 void VisitRootPointers(Root root, Object** start, Object** end) override { 1482 void VisitRootPointers(Root root, Object** start, Object** end) override {
(...skipping 1110 matching lines...) Expand 10 before | Expand all | Expand 10 after
2581 Map* map = object->map(); 2593 Map* map = object->map();
2582 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>( 2594 DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
2583 object, MarkingState::External(object)))); 2595 object, MarkingState::External(object))));
2584 StaticYoungGenerationMarkingVisitor::IterateBody(map, object); 2596 StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
2585 } 2597 }
2586 } 2598 }
2587 2599
2588 void MinorMarkCompactCollector::CollectGarbage() { 2600 void MinorMarkCompactCollector::CollectGarbage() {
2589 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); 2601 heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
2590 2602
2603 CleanupSweepToIteratePages();
2604
2591 MarkLiveObjects(); 2605 MarkLiveObjects();
2592 ClearNonLiveReferences(); 2606 ClearNonLiveReferences();
2593 #ifdef VERIFY_HEAP 2607 #ifdef VERIFY_HEAP
2594 if (FLAG_verify_heap) { 2608 if (FLAG_verify_heap) {
2595 YoungGenerationMarkingVerifier verifier(heap()); 2609 YoungGenerationMarkingVerifier verifier(heap());
2596 verifier.Run(); 2610 verifier.Run();
2597 } 2611 }
2598 #endif // VERIFY_HEAP 2612 #endif // VERIFY_HEAP
2599 2613
2600 Evacuate(); 2614 Evacuate();
2601 #ifdef VERIFY_HEAP 2615 #ifdef VERIFY_HEAP
2602 if (FLAG_verify_heap) { 2616 if (FLAG_verify_heap) {
2603 YoungGenerationEvacuationVerifier verifier(heap()); 2617 YoungGenerationEvacuationVerifier verifier(heap());
2604 verifier.Run(); 2618 verifier.Run();
2605 } 2619 }
2606 #endif // VERIFY_HEAP 2620 #endif // VERIFY_HEAP
2607 2621
2608 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); 2622 heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge();
2609 2623
2610 { 2624 {
2611 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); 2625 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS);
2612 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), 2626 for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
2613 heap()->new_space()->FromSpaceEnd())) { 2627 heap()->new_space()->FromSpaceEnd())) {
2614 marking_state(p).ClearLiveness(); 2628 if (!p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
2629 marking_state(p).ClearLiveness();
2630 }
2615 } 2631 }
2616 } 2632 }
2617 2633
2618 heap()->account_external_memory_concurrently_freed(); 2634 heap()->account_external_memory_concurrently_freed();
2619 } 2635 }
2620 2636
2637 void MinorMarkCompactCollector::MakeIterable(
2638 Page* p, MarkingTreatmentMode marking_mode,
2639 FreeSpaceTreatmentMode free_space_mode) {
2640 // We have to clear the full collectors markbits for the areas that we
2641 // remove here.
2642 MarkCompactCollector* full_collector = heap()->mark_compact_collector();
2643 Address free_start = p->area_start();
2644 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
2645 LiveObjectIterator<kBlackObjects> it(p, marking_state(p));
2646 HeapObject* object = nullptr;
2647
2648 while ((object = it.Next()) != nullptr) {
2649 DCHECK(ObjectMarking::IsBlack(object, marking_state(object)));
2650 Address free_end = object->address();
2651 if (free_end != free_start) {
2652 CHECK_GT(free_end, free_start);
2653 size_t size = static_cast<size_t>(free_end - free_start);
2654 if (free_space_mode == ZAP_FREE_SPACE) {
2655 memset(free_start, 0xcc, size);
2656 }
2657 full_collector->marking_state(p).bitmap()->ClearRange(
ulan 2017/05/09 11:31:54 Shouldn't this be guarded with marking_mode?
Michael Lippautz 2017/05/09 13:01:25 Done.
2658 p->AddressToMarkbitIndex(free_start),
2659 p->AddressToMarkbitIndex(free_end));
2660 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
2661 ClearRecordedSlots::kNo);
2662 }
2663 Map* map = object->synchronized_map();
2664 int size = object->SizeFromMap(map);
2665 free_start = free_end + size;
2666 }
2667
2668 if (free_start != p->area_end()) {
2669 CHECK_GT(p->area_end(), free_start);
2670 size_t size = static_cast<size_t>(p->area_end() - free_start);
2671 if (free_space_mode == ZAP_FREE_SPACE) {
2672 memset(free_start, 0xcc, size);
2673 }
2674 full_collector->marking_state(p).bitmap()->ClearRange(
2675 p->AddressToMarkbitIndex(free_start),
2676 p->AddressToMarkbitIndex(p->area_end()));
2677 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
2678 ClearRecordedSlots::kNo);
2679 }
2680
2681 if (marking_mode == MarkingTreatmentMode::CLEAR) {
2682 marking_state(p).ClearLiveness();
2683 p->ClearFlag(Page::SWEEP_TO_ITERATE);
2684 }
2685 }
2686
2621 void MinorMarkCompactCollector::ClearNonLiveReferences() { 2687 void MinorMarkCompactCollector::ClearNonLiveReferences() {
2622 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); 2688 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2623 2689
2624 { 2690 {
2625 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); 2691 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2626 // Internalized strings are always stored in old space, so there is no need 2692 // Internalized strings are always stored in old space, so there is no need
2627 // to clean them here. 2693 // to clean them here.
2628 YoungGenerationExternalStringTableCleaner external_visitor(*this); 2694 YoungGenerationExternalStringTableCleaner external_visitor(*this);
2629 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); 2695 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
2630 heap()->external_string_table_.CleanUpNewSpaceStrings(); 2696 heap()->external_string_table_.CleanUpNewSpaceStrings();
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2673 if (!heap()->new_space()->Rebalance()) { 2739 if (!heap()->new_space()->Rebalance()) {
2674 FatalProcessOutOfMemory("NewSpace::Rebalance"); 2740 FatalProcessOutOfMemory("NewSpace::Rebalance");
2675 } 2741 }
2676 } 2742 }
2677 2743
2678 // Give pages that are queued to be freed back to the OS. 2744 // Give pages that are queued to be freed back to the OS.
2679 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 2745 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2680 2746
2681 { 2747 {
2682 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 2748 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
2683 // TODO(mlippautz): Implement page promotion. 2749 for (Page* p : new_space_evacuation_pages_) {
2750 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
2751 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
2752 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
2753 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
2754 p->SetFlag(Page::SWEEP_TO_ITERATE);
2755 sweep_to_iterate_pages_.push_back(p);
2756 }
2757 }
2684 new_space_evacuation_pages_.Rewind(0); 2758 new_space_evacuation_pages_.Rewind(0);
2685 } 2759 }
2686 2760
2687 { 2761 {
2688 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); 2762 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
2689 EvacuateEpilogue(); 2763 EvacuateEpilogue();
2690 } 2764 }
2691 } 2765 }
2692 2766
2693 void MarkCompactCollector::MarkLiveObjects() { 2767 void MarkCompactCollector::MarkLiveObjects() {
(...skipping 870 matching lines...) Expand 10 before | Expand all | Expand 10 after
3564 *live_bytes = state.live_bytes(); 3638 *live_bytes = state.live_bytes();
3565 switch (ComputeEvacuationMode(page)) { 3639 switch (ComputeEvacuationMode(page)) {
3566 case kObjectsNewToOld: 3640 case kObjectsNewToOld:
3567 success = object_visitor.VisitBlackObjects( 3641 success = object_visitor.VisitBlackObjects(
3568 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); 3642 page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
3569 DCHECK(success); 3643 DCHECK(success);
3570 ArrayBufferTracker::ProcessBuffers( 3644 ArrayBufferTracker::ProcessBuffers(
3571 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); 3645 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3572 break; 3646 break;
3573 case kPageNewToOld: 3647 case kPageNewToOld:
3574 // TODO(mlippautz): Implement page promotion. 3648 success = object_visitor.VisitBlackObjects(
3575 UNREACHABLE(); 3649 page, state, &new_to_old_page_visitor_,
3650 LiveObjectVisitor::kKeepMarking);
3651 DCHECK(success);
3652 new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
3653 // TODO(mlippautz): If cleaning array buffers is too slow here we can
3654 // delay it until the next GC.
3655 ArrayBufferTracker::FreeDead(page, state);
3656 if (heap()->ShouldZapGarbage())
3657 collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3658 ZAP_FREE_SPACE);
3576 break; 3659 break;
3577 case kPageNewToNew: 3660 case kPageNewToNew:
3578 // TODO(mlippautz): Implement page promotion. 3661 success = object_visitor.VisitBlackObjects(
3579 UNREACHABLE(); 3662 page, state, &new_to_new_page_visitor_,
3663 LiveObjectVisitor::kKeepMarking);
3664 DCHECK(success);
3665 new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
3666 // TODO(mlippautz): If cleaning array buffers is too slow here we can
3667 // delay it until the next GC.
3668 ArrayBufferTracker::FreeDead(page, state);
3669 if (heap()->ShouldZapGarbage())
3670 collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
3671 ZAP_FREE_SPACE);
3580 break; 3672 break;
3581 case kObjectsOldToOld: 3673 case kObjectsOldToOld:
3582 UNREACHABLE(); 3674 UNREACHABLE();
3583 break; 3675 break;
3584 } 3676 }
3585 return success; 3677 return success;
3586 } 3678 }
3587 3679
3588 class EvacuationJobTraits { 3680 class EvacuationJobTraits {
3589 public: 3681 public:
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
3709 if (!reduce_memory && !page->NeverEvacuate() && 3801 if (!reduce_memory && !page->NeverEvacuate() &&
3710 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) && 3802 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) &&
3711 !page->Contains(age_mark) && 3803 !page->Contains(age_mark) &&
3712 heap()->CanExpandOldGeneration(live_bytes_on_page)) { 3804 heap()->CanExpandOldGeneration(live_bytes_on_page)) {
3713 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { 3805 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3714 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page); 3806 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3715 } else { 3807 } else {
3716 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page); 3808 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3717 } 3809 }
3718 } 3810 }
3719
3720 job.AddPage(page, {&abandoned_pages, marking_state(page)}); 3811 job.AddPage(page, {&abandoned_pages, marking_state(page)});
3721 } 3812 }
3722 DCHECK_GE(job.NumberOfPages(), 1); 3813 DCHECK_GE(job.NumberOfPages(), 1);
3723 3814
3724 RecordMigratedSlotVisitor record_visitor(this); 3815 RecordMigratedSlotVisitor record_visitor(this);
3725 CreateAndExecuteEvacuationTasks<FullEvacuator>( 3816 CreateAndExecuteEvacuationTasks<FullEvacuator>(
3726 this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages); 3817 this, &job, &record_visitor, nullptr, live_bytes, abandoned_pages);
3727 } 3818 }
3728 3819
3729 void MinorMarkCompactCollector::EvacuatePagesInParallel() { 3820 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
3730 PageParallelJob<EvacuationJobTraits> job( 3821 PageParallelJob<EvacuationJobTraits> job(
3731 heap_, heap_->isolate()->cancelable_task_manager(), 3822 heap_, heap_->isolate()->cancelable_task_manager(),
3732 &page_parallel_job_semaphore_); 3823 &page_parallel_job_semaphore_);
3733 int abandoned_pages = 0; 3824 int abandoned_pages = 0;
3734 intptr_t live_bytes = 0; 3825 intptr_t live_bytes = 0;
3735 3826
3827 const bool reduce_memory = heap()->ShouldReduceMemory();
3828 const Address age_mark = heap()->new_space()->age_mark();
3736 for (Page* page : new_space_evacuation_pages_) { 3829 for (Page* page : new_space_evacuation_pages_) {
3737 intptr_t live_bytes_on_page = marking_state(page).live_bytes(); 3830 intptr_t live_bytes_on_page = marking_state(page).live_bytes();
3738 live_bytes += live_bytes_on_page; 3831 live_bytes += live_bytes_on_page;
3739 // TODO(mlippautz): Implement page promotion. 3832 if (!reduce_memory && !page->NeverEvacuate() &&
ulan 2017/05/09 11:31:53 Let's extract this predicate into a separate funct
Michael Lippautz 2017/05/09 13:01:24 Done.
3833 (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) &&
3834 !page->Contains(age_mark) &&
3835 heap()->CanExpandOldGeneration(live_bytes_on_page)) {
3836 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3837 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3838 } else {
3839 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3840 }
3841 }
3740 job.AddPage(page, {&abandoned_pages, marking_state(page)}); 3842 job.AddPage(page, {&abandoned_pages, marking_state(page)});
3741 } 3843 }
3742 DCHECK_GE(job.NumberOfPages(), 1); 3844 DCHECK_GE(job.NumberOfPages(), 1);
3743 3845
3744 YoungGenerationMigrationObserver observer(heap(), 3846 YoungGenerationMigrationObserver observer(heap(),
3745 heap()->mark_compact_collector()); 3847 heap()->mark_compact_collector());
3746 YoungGenerationRecordMigratedSlotVisitor record_visitor( 3848 YoungGenerationRecordMigratedSlotVisitor record_visitor(
3747 heap()->mark_compact_collector()); 3849 heap()->mark_compact_collector());
3748 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( 3850 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
3749 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); 3851 this, &job, &record_visitor, &observer, live_bytes, abandoned_pages);
(...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after
4053 FullEvacuationVerifier verifier(heap()); 4155 FullEvacuationVerifier verifier(heap());
4054 verifier.Run(); 4156 verifier.Run();
4055 } 4157 }
4056 #endif 4158 #endif
4057 } 4159 }
4058 4160
4059 template <RememberedSetType type> 4161 template <RememberedSetType type>
4060 class PointerUpdateJobTraits { 4162 class PointerUpdateJobTraits {
4061 public: 4163 public:
4062 typedef int PerPageData; // Per page data is not used in this job. 4164 typedef int PerPageData; // Per page data is not used in this job.
4063 typedef int PerTaskData; // Per task data is not used in this job. 4165 typedef const MarkCompactCollectorBase* PerTaskData;
4064 4166
4065 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, 4167 static bool ProcessPageInParallel(Heap* heap, PerTaskData task_data,
4066 PerPageData) { 4168 MemoryChunk* chunk, PerPageData) {
4067 UpdateUntypedPointers(heap, chunk); 4169 UpdateUntypedPointers(heap, chunk, task_data);
4068 UpdateTypedPointers(heap, chunk); 4170 UpdateTypedPointers(heap, chunk, task_data);
4069 return true; 4171 return true;
4070 } 4172 }
4071 static const bool NeedSequentialFinalization = false; 4173 static const bool NeedSequentialFinalization = false;
4072 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { 4174 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
4073 } 4175 }
4074 4176
4075 private: 4177 private:
4076 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { 4178 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
4179 const MarkCompactCollectorBase* collector) {
4077 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); 4180 base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
4078 if (type == OLD_TO_NEW) { 4181 if (type == OLD_TO_NEW) {
4079 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { 4182 RememberedSet<OLD_TO_NEW>::Iterate(
4080 return CheckAndUpdateOldToNewSlot(heap, slot); 4183 chunk, [heap, collector](Address slot) {
4081 }); 4184 return CheckAndUpdateOldToNewSlot(heap, slot, collector);
4185 });
4082 } else { 4186 } else {
4083 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { 4187 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
4084 return UpdateSlot(reinterpret_cast<Object**>(slot)); 4188 return UpdateSlot(reinterpret_cast<Object**>(slot));
4085 }); 4189 });
4086 } 4190 }
4087 } 4191 }
4088 4192
4089 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { 4193 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
4194 const MarkCompactCollectorBase* collector) {
4090 if (type == OLD_TO_OLD) { 4195 if (type == OLD_TO_OLD) {
4091 Isolate* isolate = heap->isolate(); 4196 Isolate* isolate = heap->isolate();
4092 RememberedSet<OLD_TO_OLD>::IterateTyped( 4197 RememberedSet<OLD_TO_OLD>::IterateTyped(
4093 chunk, 4198 chunk,
4094 [isolate](SlotType slot_type, Address host_addr, Address slot) { 4199 [isolate](SlotType slot_type, Address host_addr, Address slot) {
4095 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type, 4200 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
4096 slot, UpdateSlot); 4201 slot, UpdateSlot);
4097 }); 4202 });
4098 } else { 4203 } else {
4099 Isolate* isolate = heap->isolate(); 4204 Isolate* isolate = heap->isolate();
4100 RememberedSet<OLD_TO_NEW>::IterateTyped( 4205 RememberedSet<OLD_TO_NEW>::IterateTyped(
4101 chunk, 4206 chunk, [isolate, heap, collector](SlotType slot_type,
4102 [isolate, heap](SlotType slot_type, Address host_addr, Address slot) { 4207 Address host_addr, Address slot) {
4103 return UpdateTypedSlotHelper::UpdateTypedSlot( 4208 return UpdateTypedSlotHelper::UpdateTypedSlot(
4104 isolate, slot_type, slot, [heap](Object** slot) { 4209 isolate, slot_type, slot, [heap, collector](Object** slot) {
4105 return CheckAndUpdateOldToNewSlot( 4210 return CheckAndUpdateOldToNewSlot(
4106 heap, reinterpret_cast<Address>(slot)); 4211 heap, reinterpret_cast<Address>(slot), collector);
4107 }); 4212 });
4108 }); 4213 });
4109 } 4214 }
4110 } 4215 }
4111 4216
4112 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, 4217 static SlotCallbackResult CheckAndUpdateOldToNewSlot(
4113 Address slot_address) { 4218 Heap* heap, Address slot_address,
4219 const MarkCompactCollectorBase* collector) {
4114 // There may be concurrent action on slots in dead objects. Concurrent 4220 // There may be concurrent action on slots in dead objects. Concurrent
4115 // sweeper threads may overwrite the slot content with a free space object. 4221 // sweeper threads may overwrite the slot content with a free space object.
4116 // Moreover, the pointed-to object may also get concurrently overwritten 4222 // Moreover, the pointed-to object may also get concurrently overwritten
4117 // with a free space object. The sweeper always gets priority performing 4223 // with a free space object. The sweeper always gets priority performing
4118 // these writes. 4224 // these writes.
4119 base::NoBarrierAtomicValue<Object*>* slot = 4225 base::NoBarrierAtomicValue<Object*>* slot =
4120 base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address); 4226 base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
4121 Object* slot_reference = slot->Value(); 4227 Object* slot_reference = slot->Value();
4122 if (heap->InFromSpace(slot_reference)) { 4228 if (heap->InFromSpace(slot_reference)) {
4123 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference); 4229 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
(...skipping 19 matching lines...) Expand all
4143 if (heap->InToSpace(slot->Value())) { 4249 if (heap->InToSpace(slot->Value())) {
4144 return KEEP_SLOT; 4250 return KEEP_SLOT;
4145 } 4251 }
4146 } else if (heap->InToSpace(slot_reference)) { 4252 } else if (heap->InToSpace(slot_reference)) {
4147 // Slots can point to "to" space if the page has been moved, or if the 4253 // Slots can point to "to" space if the page has been moved, or if the
4148 // slot has been recorded multiple times in the remembered set. Since 4254 // slot has been recorded multiple times in the remembered set. Since
4149 // there is no forwarding information present we need to check the 4255 // there is no forwarding information present we need to check the
4150 // markbits to determine liveness. 4256 // markbits to determine liveness.
4151 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference); 4257 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
4152 if (ObjectMarking::IsBlack(heap_object, 4258 if (ObjectMarking::IsBlack(heap_object,
4153 MarkingState::Internal(heap_object))) 4259 collector->marking_state(heap_object)))
4154 return KEEP_SLOT; 4260 return KEEP_SLOT;
4155 } else { 4261 } else {
4156 DCHECK(!heap->InNewSpace(slot_reference)); 4262 DCHECK(!heap->InNewSpace(slot_reference));
4157 } 4263 }
4158 return REMOVE_SLOT; 4264 return REMOVE_SLOT;
4159 } 4265 }
4160 }; 4266 };
4161 4267
4162 int NumberOfPointerUpdateTasks(int pages) { 4268 int NumberOfPointerUpdateTasks(int pages) {
4163 if (!FLAG_parallel_pointer_update) return 1; 4269 if (!FLAG_parallel_pointer_update) return 1;
4164 const int available_cores = Max( 4270 const int available_cores = Max(
4165 1, static_cast<int>( 4271 1, static_cast<int>(
4166 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); 4272 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
4167 const int kPagesPerTask = 4; 4273 const int kPagesPerTask = 4;
4168 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); 4274 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
4169 } 4275 }
4170 4276
4171 template <RememberedSetType type> 4277 template <RememberedSetType type>
4172 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { 4278 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore,
4279 const MarkCompactCollectorBase* collector) {
4173 PageParallelJob<PointerUpdateJobTraits<type> > job( 4280 PageParallelJob<PointerUpdateJobTraits<type> > job(
4174 heap, heap->isolate()->cancelable_task_manager(), semaphore); 4281 heap, heap->isolate()->cancelable_task_manager(), semaphore);
4175 RememberedSet<type>::IterateMemoryChunks( 4282 RememberedSet<type>::IterateMemoryChunks(
4176 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); 4283 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
4177 int num_pages = job.NumberOfPages(); 4284 int num_pages = job.NumberOfPages();
4178 int num_tasks = NumberOfPointerUpdateTasks(num_pages); 4285 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
4179 job.Run(num_tasks, [](int i) { return 0; }); 4286 job.Run(num_tasks, [collector](int i) { return collector; });
4180 } 4287 }
4181 4288
4182 class ToSpacePointerUpdateJobTraits { 4289 class ToSpacePointerUpdateJobTraits {
4183 public: 4290 public:
4184 struct PageData { 4291 struct PageData {
4185 Address start; 4292 Address start;
4186 Address end; 4293 Address end;
4187 MarkingState marking_state; 4294 MarkingState marking_state;
4188 }; 4295 };
4189 4296
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
4256 4363
4257 4364
4258 { 4365 {
4259 TRACE_GC(heap()->tracer(), 4366 TRACE_GC(heap()->tracer(),
4260 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); 4367 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
4261 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, 4368 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
4262 *this); 4369 *this);
4263 // Update roots. 4370 // Update roots.
4264 PointersUpdatingVisitor updating_visitor; 4371 PointersUpdatingVisitor updating_visitor;
4265 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 4372 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
4266 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); 4373 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
4374 this);
4267 } 4375 }
4268 4376
4269 { 4377 {
4270 Heap* heap = this->heap(); 4378 Heap* heap = this->heap();
4271 TRACE_GC(heap->tracer(), 4379 TRACE_GC(heap->tracer(),
4272 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); 4380 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
4273 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); 4381 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_,
4382 this);
4274 } 4383 }
4275 4384
4276 { 4385 {
4277 TRACE_GC(heap()->tracer(), 4386 TRACE_GC(heap()->tracer(),
4278 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); 4387 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4279 // Update pointers from external string table. 4388 // Update pointers from external string table.
4280 heap_->UpdateReferencesInExternalStringTable( 4389 heap_->UpdateReferencesInExternalStringTable(
4281 &UpdateReferenceInExternalStringTableEntry); 4390 &UpdateReferenceInExternalStringTableEntry);
4282 4391
4283 EvacuationWeakObjectRetainer evacuation_object_retainer; 4392 EvacuationWeakObjectRetainer evacuation_object_retainer;
4284 heap()->ProcessWeakListRoots(&evacuation_object_retainer); 4393 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4285 } 4394 }
4286 } 4395 }
4287 4396
4288 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { 4397 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
4289 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); 4398 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
4290 4399
4291 PointersUpdatingVisitor updating_visitor; 4400 PointersUpdatingVisitor updating_visitor;
4292 4401
4293 { 4402 {
4294 TRACE_GC(heap()->tracer(), 4403 TRACE_GC(heap()->tracer(),
4295 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); 4404 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
4296 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, 4405 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
4297 *this); 4406 *this);
4298 // TODO(mlippautz): Iteration mode is not optimal as we process all 4407 // TODO(mlippautz): Iteration mode is not optimal as we process all
4299 // global handles. Find a way to only process the ones related to new 4408 // global handles. Find a way to only process the ones related to new
4300 // space. 4409 // space.
4301 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 4410 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
4302 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); 4411 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
4412 this);
4303 } 4413 }
4304 4414
4305 { 4415 {
4306 TRACE_GC(heap()->tracer(), 4416 TRACE_GC(heap()->tracer(),
4307 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); 4417 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4308 4418
4309 EvacuationWeakObjectRetainer evacuation_object_retainer; 4419 EvacuationWeakObjectRetainer evacuation_object_retainer;
4310 heap()->ProcessWeakListRoots(&evacuation_object_retainer); 4420 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4311 4421
4312 // Update pointers from external string table. 4422 // Update pointers from external string table.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4350 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, 4460 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
4351 AllocationSpace identity) { 4461 AllocationSpace identity) {
4352 int max_freed = 0; 4462 int max_freed = 0;
4353 { 4463 {
4354 base::LockGuard<base::RecursiveMutex> guard(page->mutex()); 4464 base::LockGuard<base::RecursiveMutex> guard(page->mutex());
4355 // If this page was already swept in the meantime, we can return here. 4465 // If this page was already swept in the meantime, we can return here.
4356 if (page->SweepingDone()) return 0; 4466 if (page->SweepingDone()) return 0;
4357 DCHECK_EQ(Page::kSweepingPending, 4467 DCHECK_EQ(Page::kSweepingPending,
4358 page->concurrent_sweeping_state().Value()); 4468 page->concurrent_sweeping_state().Value());
4359 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 4469 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4360 const Sweeper::FreeSpaceTreatmentMode free_space_mode = 4470 const FreeSpaceTreatmentMode free_space_mode =
4361 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; 4471 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
4362 if (identity == NEW_SPACE) { 4472 if (identity == NEW_SPACE) {
4363 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); 4473 RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
4364 } else { 4474 } else {
4365 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); 4475 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
4366 } 4476 }
4367 DCHECK(page->SweepingDone()); 4477 DCHECK(page->SweepingDone());
4368 4478
4369 // After finishing sweeping of a page we clean up its remembered set. 4479 // After finishing sweeping of a page we clean up its remembered set.
4370 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>(); 4480 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
4435 continue; 4545 continue;
4436 } 4546 }
4437 4547
4438 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { 4548 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
4439 // We need to sweep the page to get it into an iterable state again. Note 4549 // We need to sweep the page to get it into an iterable state again. Note
4440 // that this adds unusable memory into the free list that is later on 4550 // that this adds unusable memory into the free list that is later on
4441 // (in the free list) dropped again. Since we only use the flag for 4551 // (in the free list) dropped again. Since we only use the flag for
4442 // testing this is fine. 4552 // testing this is fine.
4443 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 4553 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
4444 Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST, 4554 Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
4445 Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE 4555 Heap::ShouldZapGarbage()
4446 : Sweeper::IGNORE_FREE_SPACE); 4556 ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
4557 : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
4447 continue; 4558 continue;
4448 } 4559 }
4449 4560
4450 // One unused page is kept, all further are released before sweeping them. 4561 // One unused page is kept, all further are released before sweeping them.
4451 if (MarkingState::Internal(p).live_bytes() == 0) { 4562 if (MarkingState::Internal(p).live_bytes() == 0) {
4452 if (unused_page_present) { 4563 if (unused_page_present) {
4453 if (FLAG_gc_verbose) { 4564 if (FLAG_gc_verbose) {
4454 PrintIsolate(isolate(), "sweeping: released page: %p", 4565 PrintIsolate(isolate(), "sweeping: released page: %p",
4455 static_cast<void*>(p)); 4566 static_cast<void*>(p));
4456 } 4567 }
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
4530 // The target is always in old space, we don't have to record the slot in 4641 // The target is always in old space, we don't have to record the slot in
4531 // the old-to-new remembered set. 4642 // the old-to-new remembered set.
4532 DCHECK(!heap()->InNewSpace(target)); 4643 DCHECK(!heap()->InNewSpace(target));
4533 RecordRelocSlot(host, &rinfo, target); 4644 RecordRelocSlot(host, &rinfo, target);
4534 } 4645 }
4535 } 4646 }
4536 } 4647 }
4537 4648
4538 } // namespace internal 4649 } // namespace internal
4539 } // namespace v8 4650 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698