OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 671 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
682 space->top() == space->limit() | 682 space->top() == space->limit() |
683 ? nullptr | 683 ? nullptr |
684 : Page::FromAllocationAreaAddress(space->top()); | 684 : Page::FromAllocationAreaAddress(space->top()); |
685 for (Page* p : *space) { | 685 for (Page* p : *space) { |
686 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue; | 686 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue; |
687 // Invariant: Evacuation candidates are just created when marking is | 687 // Invariant: Evacuation candidates are just created when marking is |
688 // started. This means that sweeping has finished. Furthermore, at the end | 688 // started. This means that sweeping has finished. Furthermore, at the end |
689 // of a GC all evacuation candidates are cleared and their slot buffers are | 689 // of a GC all evacuation candidates are cleared and their slot buffers are |
690 // released. | 690 // released. |
691 CHECK(!p->IsEvacuationCandidate()); | 691 CHECK(!p->IsEvacuationCandidate()); |
692 CHECK_NULL(p->old_to_old_slots()); | 692 CHECK_NULL(p->slot_set<OLD_TO_OLD>()); |
693 CHECK_NULL(p->typed_old_to_old_slots()); | 693 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>()); |
694 CHECK(p->SweepingDone()); | 694 CHECK(p->SweepingDone()); |
695 DCHECK(p->area_size() == area_size); | 695 DCHECK(p->area_size() == area_size); |
696 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); | 696 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); |
697 } | 697 } |
698 | 698 |
699 int candidate_count = 0; | 699 int candidate_count = 0; |
700 size_t total_live_bytes = 0; | 700 size_t total_live_bytes = 0; |
701 | 701 |
702 const bool reduce_memory = heap()->ShouldReduceMemory(); | 702 const bool reduce_memory = heap()->ShouldReduceMemory(); |
703 if (FLAG_manual_evacuation_candidates_selection) { | 703 if (FLAG_manual_evacuation_candidates_selection) { |
(...skipping 2719 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3423 return map_word.ToForwardingAddress(); | 3423 return map_word.ToForwardingAddress(); |
3424 } | 3424 } |
3425 } | 3425 } |
3426 return object; | 3426 return object; |
3427 } | 3427 } |
3428 }; | 3428 }; |
3429 | 3429 |
3430 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode | 3430 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode |
3431 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) { | 3431 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) { |
3432 AllocationSpace identity = p->owner()->identity(); | 3432 AllocationSpace identity = p->owner()->identity(); |
3433 if (p->old_to_new_slots() && | 3433 if (p->slot_set<OLD_TO_NEW>() && |
3434 (identity == OLD_SPACE || identity == MAP_SPACE)) { | 3434 (identity == OLD_SPACE || identity == MAP_SPACE)) { |
3435 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS; | 3435 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS; |
3436 } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) { | 3436 } else if (p->typed_slot_set<OLD_TO_NEW>() && identity == CODE_SPACE) { |
3437 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS; | 3437 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS; |
3438 } | 3438 } |
3439 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR; | 3439 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR; |
3440 } | 3440 } |
3441 | 3441 |
3442 int MarkCompactCollector::Sweeper::RawSweep( | 3442 int MarkCompactCollector::Sweeper::RawSweep( |
3443 Page* p, FreeListRebuildingMode free_list_mode, | 3443 Page* p, FreeListRebuildingMode free_list_mode, |
3444 FreeSpaceTreatmentMode free_space_mode) { | 3444 FreeSpaceTreatmentMode free_space_mode) { |
3445 Space* space = p->owner(); | 3445 Space* space = p->owner(); |
3446 DCHECK_NOT_NULL(space); | 3446 DCHECK_NOT_NULL(space); |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3540 SlotSet::KEEP_EMPTY_BUCKETS); | 3540 SlotSet::KEEP_EMPTY_BUCKETS); |
3541 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { | 3541 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { |
3542 free_ranges.insert(std::pair<uint32_t, uint32_t>( | 3542 free_ranges.insert(std::pair<uint32_t, uint32_t>( |
3543 static_cast<uint32_t>(free_start - p->address()), | 3543 static_cast<uint32_t>(free_start - p->address()), |
3544 static_cast<uint32_t>(p->area_end() - p->address()))); | 3544 static_cast<uint32_t>(p->area_end() - p->address()))); |
3545 } | 3545 } |
3546 } | 3546 } |
3547 | 3547 |
3548 // Clear invalid typed slots after collection all free ranges. | 3548 // Clear invalid typed slots after collection all free ranges. |
3549 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { | 3549 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { |
3550 p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges); | 3550 TypedSlotSet* typed_slot_set = p->typed_slot_set<OLD_TO_NEW>(); |
| 3551 if (typed_slot_set != nullptr) { |
| 3552 typed_slot_set->RemoveInvaldSlots(free_ranges); |
| 3553 } |
3551 } | 3554 } |
3552 | 3555 |
3553 // Clear the mark bits of that page and reset live bytes count. | 3556 // Clear the mark bits of that page and reset live bytes count. |
3554 MarkingState::Internal(p).ClearLiveness(); | 3557 MarkingState::Internal(p).ClearLiveness(); |
3555 | 3558 |
3556 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3559 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3557 if (free_list_mode == IGNORE_FREE_LIST) return 0; | 3560 if (free_list_mode == IGNORE_FREE_LIST) return 0; |
3558 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes)); | 3561 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes)); |
3559 } | 3562 } |
3560 | 3563 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3599 IterationMode iteration_mode) { | 3602 IterationMode iteration_mode) { |
3600 LiveObjectIterator<kBlackObjects> it(chunk, state); | 3603 LiveObjectIterator<kBlackObjects> it(chunk, state); |
3601 HeapObject* object = nullptr; | 3604 HeapObject* object = nullptr; |
3602 while ((object = it.Next()) != nullptr) { | 3605 while ((object = it.Next()) != nullptr) { |
3603 DCHECK(ObjectMarking::IsBlack(object, state)); | 3606 DCHECK(ObjectMarking::IsBlack(object, state)); |
3604 if (!visitor->Visit(object)) { | 3607 if (!visitor->Visit(object)) { |
3605 if (iteration_mode == kClearMarkbits) { | 3608 if (iteration_mode == kClearMarkbits) { |
3606 state.bitmap()->ClearRange( | 3609 state.bitmap()->ClearRange( |
3607 chunk->AddressToMarkbitIndex(chunk->area_start()), | 3610 chunk->AddressToMarkbitIndex(chunk->area_start()), |
3608 chunk->AddressToMarkbitIndex(object->address())); | 3611 chunk->AddressToMarkbitIndex(object->address())); |
3609 if (chunk->old_to_new_slots() != nullptr) { | 3612 SlotSet* slot_set = chunk->slot_set<OLD_TO_NEW>(); |
3610 chunk->old_to_new_slots()->RemoveRange( | 3613 if (slot_set != nullptr) { |
| 3614 slot_set->RemoveRange( |
3611 0, static_cast<int>(object->address() - chunk->address()), | 3615 0, static_cast<int>(object->address() - chunk->address()), |
3612 SlotSet::PREFREE_EMPTY_BUCKETS); | 3616 SlotSet::PREFREE_EMPTY_BUCKETS); |
3613 } | 3617 } |
3614 if (chunk->typed_old_to_new_slots() != nullptr) { | 3618 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(), |
3615 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(), | 3619 object->address()); |
3616 object->address()); | |
3617 } | |
3618 RecomputeLiveBytes(chunk, state); | 3620 RecomputeLiveBytes(chunk, state); |
3619 } | 3621 } |
3620 return false; | 3622 return false; |
3621 } | 3623 } |
3622 } | 3624 } |
3623 if (iteration_mode == kClearMarkbits) { | 3625 if (iteration_mode == kClearMarkbits) { |
3624 state.ClearLiveness(); | 3626 state.ClearLiveness(); |
3625 } | 3627 } |
3626 return true; | 3628 return true; |
3627 } | 3629 } |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3707 EvacuateEpilogue(); | 3709 EvacuateEpilogue(); |
3708 } | 3710 } |
3709 | 3711 |
3710 #ifdef VERIFY_HEAP | 3712 #ifdef VERIFY_HEAP |
3711 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { | 3713 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { |
3712 VerifyEvacuation(heap()); | 3714 VerifyEvacuation(heap()); |
3713 } | 3715 } |
3714 #endif | 3716 #endif |
3715 } | 3717 } |
3716 | 3718 |
3717 template <PointerDirection direction> | 3719 template <RememberedSetType type> |
3718 class PointerUpdateJobTraits { | 3720 class PointerUpdateJobTraits { |
3719 public: | 3721 public: |
3720 typedef int PerPageData; // Per page data is not used in this job. | 3722 typedef int PerPageData; // Per page data is not used in this job. |
3721 typedef int PerTaskData; // Per task data is not used in this job. | 3723 typedef int PerTaskData; // Per task data is not used in this job. |
3722 | 3724 |
3723 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, | 3725 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, |
3724 PerPageData) { | 3726 PerPageData) { |
3725 UpdateUntypedPointers(heap, chunk); | 3727 UpdateUntypedPointers(heap, chunk); |
3726 UpdateTypedPointers(heap, chunk); | 3728 UpdateTypedPointers(heap, chunk); |
3727 return true; | 3729 return true; |
3728 } | 3730 } |
3729 static const bool NeedSequentialFinalization = false; | 3731 static const bool NeedSequentialFinalization = false; |
3730 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 3732 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3731 } | 3733 } |
3732 | 3734 |
3733 private: | 3735 private: |
3734 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | 3736 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
3735 if (direction == OLD_TO_NEW) { | 3737 if (type == OLD_TO_NEW) { |
3736 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { | 3738 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
3737 return CheckAndUpdateOldToNewSlot(heap, slot); | 3739 return CheckAndUpdateOldToNewSlot(heap, slot); |
3738 }); | 3740 }); |
3739 } else { | 3741 } else { |
3740 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { | 3742 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { |
3741 return UpdateSlot(reinterpret_cast<Object**>(slot)); | 3743 return UpdateSlot(reinterpret_cast<Object**>(slot)); |
3742 }); | 3744 }); |
3743 } | 3745 } |
3744 } | 3746 } |
3745 | 3747 |
3746 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { | 3748 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { |
3747 if (direction == OLD_TO_OLD) { | 3749 if (type == OLD_TO_OLD) { |
3748 Isolate* isolate = heap->isolate(); | 3750 Isolate* isolate = heap->isolate(); |
3749 RememberedSet<OLD_TO_OLD>::IterateTyped( | 3751 RememberedSet<OLD_TO_OLD>::IterateTyped( |
3750 chunk, [isolate](SlotType type, Address host_addr, Address slot) { | 3752 chunk, |
3751 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot, | 3753 [isolate](SlotType slot_type, Address host_addr, Address slot) { |
3752 UpdateSlot); | 3754 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type, |
| 3755 slot, UpdateSlot); |
3753 }); | 3756 }); |
3754 } else { | 3757 } else { |
3755 Isolate* isolate = heap->isolate(); | 3758 Isolate* isolate = heap->isolate(); |
3756 RememberedSet<OLD_TO_NEW>::IterateTyped( | 3759 RememberedSet<OLD_TO_NEW>::IterateTyped( |
3757 chunk, | 3760 chunk, |
3758 [isolate, heap](SlotType type, Address host_addr, Address slot) { | 3761 [isolate, heap](SlotType slot_type, Address host_addr, Address slot) { |
3759 return UpdateTypedSlotHelper::UpdateTypedSlot( | 3762 return UpdateTypedSlotHelper::UpdateTypedSlot( |
3760 isolate, type, slot, [heap](Object** slot) { | 3763 isolate, slot_type, slot, [heap](Object** slot) { |
3761 return CheckAndUpdateOldToNewSlot( | 3764 return CheckAndUpdateOldToNewSlot( |
3762 heap, reinterpret_cast<Address>(slot)); | 3765 heap, reinterpret_cast<Address>(slot)); |
3763 }); | 3766 }); |
3764 }); | 3767 }); |
3765 } | 3768 } |
3766 } | 3769 } |
3767 | 3770 |
3768 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, | 3771 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, |
3769 Address slot_address) { | 3772 Address slot_address) { |
3770 // There may be concurrent action on slots in dead objects. Concurrent | 3773 // There may be concurrent action on slots in dead objects. Concurrent |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3817 | 3820 |
3818 int NumberOfPointerUpdateTasks(int pages) { | 3821 int NumberOfPointerUpdateTasks(int pages) { |
3819 if (!FLAG_parallel_pointer_update) return 1; | 3822 if (!FLAG_parallel_pointer_update) return 1; |
3820 const int available_cores = Max( | 3823 const int available_cores = Max( |
3821 1, static_cast<int>( | 3824 1, static_cast<int>( |
3822 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); | 3825 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); |
3823 const int kPagesPerTask = 4; | 3826 const int kPagesPerTask = 4; |
3824 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); | 3827 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); |
3825 } | 3828 } |
3826 | 3829 |
3827 template <PointerDirection direction> | 3830 template <RememberedSetType type> |
3828 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 3831 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
3829 PageParallelJob<PointerUpdateJobTraits<direction> > job( | 3832 PageParallelJob<PointerUpdateJobTraits<type> > job( |
3830 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 3833 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3831 RememberedSet<direction>::IterateMemoryChunks( | 3834 RememberedSet<type>::IterateMemoryChunks( |
3832 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 3835 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
3833 int num_pages = job.NumberOfPages(); | 3836 int num_pages = job.NumberOfPages(); |
3834 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 3837 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
3835 job.Run(num_tasks, [](int i) { return 0; }); | 3838 job.Run(num_tasks, [](int i) { return 0; }); |
3836 } | 3839 } |
3837 | 3840 |
3838 class ToSpacePointerUpdateJobTraits { | 3841 class ToSpacePointerUpdateJobTraits { |
3839 public: | 3842 public: |
3840 typedef std::pair<Address, Address> PerPageData; | 3843 typedef std::pair<Address, Address> PerPageData; |
3841 typedef PointersUpdatingVisitor* PerTaskData; | 3844 typedef PointersUpdatingVisitor* PerTaskData; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3976 const Sweeper::FreeSpaceTreatmentMode free_space_mode = | 3979 const Sweeper::FreeSpaceTreatmentMode free_space_mode = |
3977 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; | 3980 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; |
3978 if (identity == NEW_SPACE) { | 3981 if (identity == NEW_SPACE) { |
3979 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); | 3982 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); |
3980 } else { | 3983 } else { |
3981 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); | 3984 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); |
3982 } | 3985 } |
3983 DCHECK(page->SweepingDone()); | 3986 DCHECK(page->SweepingDone()); |
3984 | 3987 |
3985 // After finishing sweeping of a page we clean up its remembered set. | 3988 // After finishing sweeping of a page we clean up its remembered set. |
3986 if (page->typed_old_to_new_slots()) { | 3989 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>(); |
3987 page->typed_old_to_new_slots()->FreeToBeFreedChunks(); | 3990 if (typed_slot_set) { |
| 3991 typed_slot_set->FreeToBeFreedChunks(); |
3988 } | 3992 } |
3989 if (page->old_to_new_slots()) { | 3993 SlotSet* slot_set = page->slot_set<OLD_TO_NEW>(); |
3990 page->old_to_new_slots()->FreeToBeFreedBuckets(); | 3994 if (slot_set) { |
| 3995 slot_set->FreeToBeFreedBuckets(); |
3991 } | 3996 } |
3992 } | 3997 } |
3993 | 3998 |
3994 { | 3999 { |
3995 base::LockGuard<base::Mutex> guard(&mutex_); | 4000 base::LockGuard<base::Mutex> guard(&mutex_); |
3996 swept_list_[identity].Add(page); | 4001 swept_list_[identity].Add(page); |
3997 } | 4002 } |
3998 return max_freed; | 4003 return max_freed; |
3999 } | 4004 } |
4000 | 4005 |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4147 // The target is always in old space, we don't have to record the slot in | 4152 // The target is always in old space, we don't have to record the slot in |
4148 // the old-to-new remembered set. | 4153 // the old-to-new remembered set. |
4149 DCHECK(!heap()->InNewSpace(target)); | 4154 DCHECK(!heap()->InNewSpace(target)); |
4150 RecordRelocSlot(host, &rinfo, target); | 4155 RecordRelocSlot(host, &rinfo, target); |
4151 } | 4156 } |
4152 } | 4157 } |
4153 } | 4158 } |
4154 | 4159 |
4155 } // namespace internal | 4160 } // namespace internal |
4156 } // namespace v8 | 4161 } // namespace v8 |
OLD | NEW |