OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 666 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
677 space->top() == space->limit() | 677 space->top() == space->limit() |
678 ? nullptr | 678 ? nullptr |
679 : Page::FromAllocationAreaAddress(space->top()); | 679 : Page::FromAllocationAreaAddress(space->top()); |
680 for (Page* p : *space) { | 680 for (Page* p : *space) { |
681 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue; | 681 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue; |
682 // Invariant: Evacuation candidates are just created when marking is | 682 // Invariant: Evacuation candidates are just created when marking is |
683 // started. This means that sweeping has finished. Furthermore, at the end | 683 // started. This means that sweeping has finished. Furthermore, at the end |
684 // of a GC all evacuation candidates are cleared and their slot buffers are | 684 // of a GC all evacuation candidates are cleared and their slot buffers are |
685 // released. | 685 // released. |
686 CHECK(!p->IsEvacuationCandidate()); | 686 CHECK(!p->IsEvacuationCandidate()); |
687 CHECK_NULL(p->slot_set<OLD_TO_OLD>()); | 687 CHECK_NULL(p->old_to_old_slots()); |
688 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>()); | 688 CHECK_NULL(p->typed_old_to_old_slots()); |
689 CHECK(p->SweepingDone()); | 689 CHECK(p->SweepingDone()); |
690 DCHECK(p->area_size() == area_size); | 690 DCHECK(p->area_size() == area_size); |
691 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); | 691 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); |
692 } | 692 } |
693 | 693 |
694 int candidate_count = 0; | 694 int candidate_count = 0; |
695 size_t total_live_bytes = 0; | 695 size_t total_live_bytes = 0; |
696 | 696 |
697 const bool reduce_memory = heap()->ShouldReduceMemory(); | 697 const bool reduce_memory = heap()->ShouldReduceMemory(); |
698 if (FLAG_manual_evacuation_candidates_selection) { | 698 if (FLAG_manual_evacuation_candidates_selection) { |
(...skipping 2693 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3392 return map_word.ToForwardingAddress(); | 3392 return map_word.ToForwardingAddress(); |
3393 } | 3393 } |
3394 } | 3394 } |
3395 return object; | 3395 return object; |
3396 } | 3396 } |
3397 }; | 3397 }; |
3398 | 3398 |
3399 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode | 3399 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode |
3400 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) { | 3400 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) { |
3401 AllocationSpace identity = p->owner()->identity(); | 3401 AllocationSpace identity = p->owner()->identity(); |
3402 if (p->slot_set<OLD_TO_NEW>() && | 3402 if (p->old_to_new_slots() && |
3403 (identity == OLD_SPACE || identity == MAP_SPACE)) { | 3403 (identity == OLD_SPACE || identity == MAP_SPACE)) { |
3404 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS; | 3404 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS; |
3405 } else if (p->typed_slot_set<OLD_TO_NEW>() && identity == CODE_SPACE) { | 3405 } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) { |
3406 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS; | 3406 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS; |
3407 } | 3407 } |
3408 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR; | 3408 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR; |
3409 } | 3409 } |
3410 | 3410 |
3411 int MarkCompactCollector::Sweeper::RawSweep( | 3411 int MarkCompactCollector::Sweeper::RawSweep( |
3412 Page* p, FreeListRebuildingMode free_list_mode, | 3412 Page* p, FreeListRebuildingMode free_list_mode, |
3413 FreeSpaceTreatmentMode free_space_mode) { | 3413 FreeSpaceTreatmentMode free_space_mode) { |
3414 Space* space = p->owner(); | 3414 Space* space = p->owner(); |
3415 DCHECK_NOT_NULL(space); | 3415 DCHECK_NOT_NULL(space); |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3509 SlotSet::KEEP_EMPTY_BUCKETS); | 3509 SlotSet::KEEP_EMPTY_BUCKETS); |
3510 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { | 3510 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { |
3511 free_ranges.insert(std::pair<uint32_t, uint32_t>( | 3511 free_ranges.insert(std::pair<uint32_t, uint32_t>( |
3512 static_cast<uint32_t>(free_start - p->address()), | 3512 static_cast<uint32_t>(free_start - p->address()), |
3513 static_cast<uint32_t>(p->area_end() - p->address()))); | 3513 static_cast<uint32_t>(p->area_end() - p->address()))); |
3514 } | 3514 } |
3515 } | 3515 } |
3516 | 3516 |
3517 // Clear invalid typed slots after collection all free ranges. | 3517 // Clear invalid typed slots after collection all free ranges. |
3518 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { | 3518 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { |
3519 p->typed_slot_set<OLD_TO_NEW>()->RemoveInvaldSlots(free_ranges); | 3519 p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges); |
3520 } | 3520 } |
3521 | 3521 |
3522 // Clear the mark bits of that page and reset live bytes count. | 3522 // Clear the mark bits of that page and reset live bytes count. |
3523 p->ClearLiveness(); | 3523 p->ClearLiveness(); |
3524 | 3524 |
3525 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3525 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3526 if (free_list_mode == IGNORE_FREE_LIST) return 0; | 3526 if (free_list_mode == IGNORE_FREE_LIST) return 0; |
3527 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes)); | 3527 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes)); |
3528 } | 3528 } |
3529 | 3529 |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3568 IterationMode iteration_mode) { | 3568 IterationMode iteration_mode) { |
3569 LiveObjectIterator<kBlackObjects> it(chunk, state); | 3569 LiveObjectIterator<kBlackObjects> it(chunk, state); |
3570 HeapObject* object = nullptr; | 3570 HeapObject* object = nullptr; |
3571 while ((object = it.Next()) != nullptr) { | 3571 while ((object = it.Next()) != nullptr) { |
3572 DCHECK(ObjectMarking::IsBlack(object, state)); | 3572 DCHECK(ObjectMarking::IsBlack(object, state)); |
3573 if (!visitor->Visit(object)) { | 3573 if (!visitor->Visit(object)) { |
3574 if (iteration_mode == kClearMarkbits) { | 3574 if (iteration_mode == kClearMarkbits) { |
3575 state.bitmap->ClearRange( | 3575 state.bitmap->ClearRange( |
3576 chunk->AddressToMarkbitIndex(chunk->area_start()), | 3576 chunk->AddressToMarkbitIndex(chunk->area_start()), |
3577 chunk->AddressToMarkbitIndex(object->address())); | 3577 chunk->AddressToMarkbitIndex(object->address())); |
3578 SlotSet* slot_set = chunk->slot_set<OLD_TO_NEW>(); | 3578 if (chunk->old_to_new_slots() != nullptr) { |
3579 if (slot_set != nullptr) { | 3579 chunk->old_to_new_slots()->RemoveRange( |
3580 slot_set->RemoveRange( | |
3581 0, static_cast<int>(object->address() - chunk->address()), | 3580 0, static_cast<int>(object->address() - chunk->address()), |
3582 SlotSet::PREFREE_EMPTY_BUCKETS); | 3581 SlotSet::PREFREE_EMPTY_BUCKETS); |
3583 } | 3582 } |
3584 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(), | 3583 if (chunk->typed_old_to_new_slots() != nullptr) { |
3585 object->address()); | 3584 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(), |
| 3585 object->address()); |
| 3586 } |
3586 RecomputeLiveBytes(chunk, state); | 3587 RecomputeLiveBytes(chunk, state); |
3587 } | 3588 } |
3588 return false; | 3589 return false; |
3589 } | 3590 } |
3590 } | 3591 } |
3591 if (iteration_mode == kClearMarkbits) { | 3592 if (iteration_mode == kClearMarkbits) { |
3592 state.ClearLiveness(); | 3593 state.ClearLiveness(); |
3593 } | 3594 } |
3594 return true; | 3595 return true; |
3595 } | 3596 } |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3675 EvacuateEpilogue(); | 3676 EvacuateEpilogue(); |
3676 } | 3677 } |
3677 | 3678 |
3678 #ifdef VERIFY_HEAP | 3679 #ifdef VERIFY_HEAP |
3679 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { | 3680 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { |
3680 VerifyEvacuation(heap()); | 3681 VerifyEvacuation(heap()); |
3681 } | 3682 } |
3682 #endif | 3683 #endif |
3683 } | 3684 } |
3684 | 3685 |
3685 template <RememberedSetType type> | 3686 template <PointerDirection direction> |
3686 class PointerUpdateJobTraits { | 3687 class PointerUpdateJobTraits { |
3687 public: | 3688 public: |
3688 typedef int PerPageData; // Per page data is not used in this job. | 3689 typedef int PerPageData; // Per page data is not used in this job. |
3689 typedef int PerTaskData; // Per task data is not used in this job. | 3690 typedef int PerTaskData; // Per task data is not used in this job. |
3690 | 3691 |
3691 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, | 3692 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, |
3692 PerPageData) { | 3693 PerPageData) { |
3693 UpdateUntypedPointers(heap, chunk); | 3694 UpdateUntypedPointers(heap, chunk); |
3694 UpdateTypedPointers(heap, chunk); | 3695 UpdateTypedPointers(heap, chunk); |
3695 return true; | 3696 return true; |
3696 } | 3697 } |
3697 static const bool NeedSequentialFinalization = false; | 3698 static const bool NeedSequentialFinalization = false; |
3698 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 3699 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3699 } | 3700 } |
3700 | 3701 |
3701 private: | 3702 private: |
3702 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | 3703 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
3703 if (type == OLD_TO_NEW) { | 3704 if (direction == OLD_TO_NEW) { |
3704 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { | 3705 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
3705 return CheckAndUpdateOldToNewSlot(heap, slot); | 3706 return CheckAndUpdateOldToNewSlot(heap, slot); |
3706 }); | 3707 }); |
3707 } else { | 3708 } else { |
3708 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { | 3709 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { |
3709 return UpdateSlot(reinterpret_cast<Object**>(slot)); | 3710 return UpdateSlot(reinterpret_cast<Object**>(slot)); |
3710 }); | 3711 }); |
3711 } | 3712 } |
3712 } | 3713 } |
3713 | 3714 |
3714 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { | 3715 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { |
3715 if (type == OLD_TO_OLD) { | 3716 if (direction == OLD_TO_OLD) { |
3716 Isolate* isolate = heap->isolate(); | 3717 Isolate* isolate = heap->isolate(); |
3717 RememberedSet<OLD_TO_OLD>::IterateTyped( | 3718 RememberedSet<OLD_TO_OLD>::IterateTyped( |
3718 chunk, | 3719 chunk, [isolate](SlotType type, Address host_addr, Address slot) { |
3719 [isolate](SlotType slot_type, Address host_addr, Address slot) { | 3720 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot, |
3720 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type, | 3721 UpdateSlot); |
3721 slot, UpdateSlot); | |
3722 }); | 3722 }); |
3723 } else { | 3723 } else { |
3724 Isolate* isolate = heap->isolate(); | 3724 Isolate* isolate = heap->isolate(); |
3725 RememberedSet<OLD_TO_NEW>::IterateTyped( | 3725 RememberedSet<OLD_TO_NEW>::IterateTyped( |
3726 chunk, | 3726 chunk, |
3727 [isolate, heap](SlotType slot_type, Address host_addr, Address slot) { | 3727 [isolate, heap](SlotType type, Address host_addr, Address slot) { |
3728 return UpdateTypedSlotHelper::UpdateTypedSlot( | 3728 return UpdateTypedSlotHelper::UpdateTypedSlot( |
3729 isolate, slot_type, slot, [heap](Object** slot) { | 3729 isolate, type, slot, [heap](Object** slot) { |
3730 return CheckAndUpdateOldToNewSlot( | 3730 return CheckAndUpdateOldToNewSlot( |
3731 heap, reinterpret_cast<Address>(slot)); | 3731 heap, reinterpret_cast<Address>(slot)); |
3732 }); | 3732 }); |
3733 }); | 3733 }); |
3734 } | 3734 } |
3735 } | 3735 } |
3736 | 3736 |
3737 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, | 3737 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, |
3738 Address slot_address) { | 3738 Address slot_address) { |
3739 // There may be concurrent action on slots in dead objects. Concurrent | 3739 // There may be concurrent action on slots in dead objects. Concurrent |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3784 | 3784 |
3785 int NumberOfPointerUpdateTasks(int pages) { | 3785 int NumberOfPointerUpdateTasks(int pages) { |
3786 if (!FLAG_parallel_pointer_update) return 1; | 3786 if (!FLAG_parallel_pointer_update) return 1; |
3787 const int available_cores = Max( | 3787 const int available_cores = Max( |
3788 1, static_cast<int>( | 3788 1, static_cast<int>( |
3789 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); | 3789 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); |
3790 const int kPagesPerTask = 4; | 3790 const int kPagesPerTask = 4; |
3791 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); | 3791 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); |
3792 } | 3792 } |
3793 | 3793 |
3794 template <RememberedSetType type> | 3794 template <PointerDirection direction> |
3795 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 3795 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
3796 PageParallelJob<PointerUpdateJobTraits<type> > job( | 3796 PageParallelJob<PointerUpdateJobTraits<direction> > job( |
3797 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 3797 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3798 RememberedSet<type>::IterateMemoryChunks( | 3798 RememberedSet<direction>::IterateMemoryChunks( |
3799 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 3799 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
3800 int num_pages = job.NumberOfPages(); | 3800 int num_pages = job.NumberOfPages(); |
3801 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 3801 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
3802 job.Run(num_tasks, [](int i) { return 0; }); | 3802 job.Run(num_tasks, [](int i) { return 0; }); |
3803 } | 3803 } |
3804 | 3804 |
3805 class ToSpacePointerUpdateJobTraits { | 3805 class ToSpacePointerUpdateJobTraits { |
3806 public: | 3806 public: |
3807 typedef std::pair<Address, Address> PerPageData; | 3807 typedef std::pair<Address, Address> PerPageData; |
3808 typedef PointersUpdatingVisitor* PerTaskData; | 3808 typedef PointersUpdatingVisitor* PerTaskData; |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3944 const Sweeper::FreeSpaceTreatmentMode free_space_mode = | 3944 const Sweeper::FreeSpaceTreatmentMode free_space_mode = |
3945 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; | 3945 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; |
3946 if (identity == NEW_SPACE) { | 3946 if (identity == NEW_SPACE) { |
3947 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); | 3947 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); |
3948 } else { | 3948 } else { |
3949 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); | 3949 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); |
3950 } | 3950 } |
3951 DCHECK(page->SweepingDone()); | 3951 DCHECK(page->SweepingDone()); |
3952 | 3952 |
3953 // After finishing sweeping of a page we clean up its remembered set. | 3953 // After finishing sweeping of a page we clean up its remembered set. |
3954 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>(); | 3954 if (page->typed_old_to_new_slots()) { |
3955 if (typed_slot_set) { | 3955 page->typed_old_to_new_slots()->FreeToBeFreedChunks(); |
3956 page->typed_slot_set<OLD_TO_NEW>()->FreeToBeFreedChunks(); | |
3957 } | 3956 } |
3958 SlotSet* slot_set = page->slot_set<OLD_TO_NEW>(); | 3957 if (page->old_to_new_slots()) { |
3959 if (slot_set) { | 3958 page->old_to_new_slots()->FreeToBeFreedBuckets(); |
3960 page->slot_set<OLD_TO_NEW>()->FreeToBeFreedBuckets(); | |
3961 } | 3959 } |
3962 } | 3960 } |
3963 | 3961 |
3964 { | 3962 { |
3965 base::LockGuard<base::Mutex> guard(&mutex_); | 3963 base::LockGuard<base::Mutex> guard(&mutex_); |
3966 swept_list_[identity].Add(page); | 3964 swept_list_[identity].Add(page); |
3967 } | 3965 } |
3968 return max_freed; | 3966 return max_freed; |
3969 } | 3967 } |
3970 | 3968 |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4115 // The target is always in old space, we don't have to record the slot in | 4113 // The target is always in old space, we don't have to record the slot in |
4116 // the old-to-new remembered set. | 4114 // the old-to-new remembered set. |
4117 DCHECK(!heap()->InNewSpace(target)); | 4115 DCHECK(!heap()->InNewSpace(target)); |
4118 RecordRelocSlot(host, &rinfo, target); | 4116 RecordRelocSlot(host, &rinfo, target); |
4119 } | 4117 } |
4120 } | 4118 } |
4121 } | 4119 } |
4122 | 4120 |
4123 } // namespace internal | 4121 } // namespace internal |
4124 } // namespace v8 | 4122 } // namespace v8 |
OLD | NEW |