OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 1800 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1811 Page::FromAddress(object->address())->owner()->identity()); | 1811 Page::FromAddress(object->address())->owner()->identity()); |
1812 HeapObject* target_object = nullptr; | 1812 HeapObject* target_object = nullptr; |
1813 if (TryEvacuateObject(target_space, object, &target_object)) { | 1813 if (TryEvacuateObject(target_space, object, &target_object)) { |
1814 DCHECK(object->map_word().IsForwardingAddress()); | 1814 DCHECK(object->map_word().IsForwardingAddress()); |
1815 return true; | 1815 return true; |
1816 } | 1816 } |
1817 return false; | 1817 return false; |
1818 } | 1818 } |
1819 }; | 1819 }; |
1820 | 1820 |
| 1821 class MarkCompactCollector::EvacuateRecordOnlyVisitor final |
| 1822 : public MarkCompactCollector::HeapObjectVisitor { |
| 1823 public: |
| 1824 bool Visit(HeapObject* object) { |
| 1825 RecordMigratedSlotVisitor visitor; |
| 1826 object->IterateBodyFast(&visitor); |
| 1827 return true; |
| 1828 } |
| 1829 }; |
1821 | 1830 |
1822 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 1831 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
1823 PageIterator it(space); | 1832 PageIterator it(space); |
1824 while (it.has_next()) { | 1833 while (it.has_next()) { |
1825 Page* p = it.next(); | 1834 Page* p = it.next(); |
1826 if (!p->IsFlagSet(Page::BLACK_PAGE)) { | 1835 if (!p->IsFlagSet(Page::BLACK_PAGE)) { |
1827 DiscoverGreyObjectsOnPage(p); | 1836 DiscoverGreyObjectsOnPage(p); |
1828 } | 1837 } |
1829 if (marking_deque()->IsFull()) return; | 1838 if (marking_deque()->IsFull()) return; |
1830 } | 1839 } |
(...skipping 1254 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3085 if (chunk->InNewSpace()) { | 3094 if (chunk->InNewSpace()) { |
3086 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), | 3095 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), |
3087 NewSpacePage::kSweepingDone); | 3096 NewSpacePage::kSweepingDone); |
3088 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); | 3097 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); |
3089 DCHECK(success); | 3098 DCHECK(success); |
3090 USE(success); | 3099 USE(success); |
3091 } else { | 3100 } else { |
3092 DCHECK(chunk->IsEvacuationCandidate()); | 3101 DCHECK(chunk->IsEvacuationCandidate()); |
3093 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); | 3102 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); |
3094 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); | 3103 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); |
| 3104 if (!success) { |
| 3105 // Aborted compaction page. We can record slots here to have them |
| 3106 // processed in parallel later on. |
| 3107 EvacuateRecordOnlyVisitor record_visitor; |
| 3108 success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); |
| 3109 DCHECK(success); |
| 3110 USE(success); |
| 3111 // We need to return failure here to indicate that we want this page added |
| 3112 // to the sweeper. |
| 3113 return false; |
| 3114 } |
3095 } | 3115 } |
3096 return success; | 3116 return success; |
3097 } | 3117 } |
3098 | 3118 |
3099 void MarkCompactCollector::Evacuator::Finalize() { | 3119 void MarkCompactCollector::Evacuator::Finalize() { |
3100 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3120 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
3101 heap()->code_space()->MergeCompactionSpace( | 3121 heap()->code_space()->MergeCompactionSpace( |
3102 compaction_spaces_.Get(CODE_SPACE)); | 3122 compaction_spaces_.Get(CODE_SPACE)); |
3103 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3123 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
3104 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); | 3124 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3145 typedef int* PerPageData; // Pointer to number of aborted pages. | 3165 typedef int* PerPageData; // Pointer to number of aborted pages. |
3146 typedef MarkCompactCollector::Evacuator* PerTaskData; | 3166 typedef MarkCompactCollector::Evacuator* PerTaskData; |
3147 | 3167 |
3148 static const bool NeedSequentialFinalization = true; | 3168 static const bool NeedSequentialFinalization = true; |
3149 | 3169 |
3150 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3170 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
3151 MemoryChunk* chunk, PerPageData) { | 3171 MemoryChunk* chunk, PerPageData) { |
3152 return evacuator->EvacuatePage(chunk); | 3172 return evacuator->EvacuatePage(chunk); |
3153 } | 3173 } |
3154 | 3174 |
3155 static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success, | 3175 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
3156 PerPageData data) { | 3176 bool success, PerPageData data) { |
3157 if (chunk->InNewSpace()) { | 3177 if (chunk->InNewSpace()) { |
3158 DCHECK(success); | 3178 DCHECK(success); |
3159 } else { | 3179 } else { |
3160 Page* p = static_cast<Page*>(chunk); | 3180 Page* p = static_cast<Page*>(chunk); |
3161 if (success) { | 3181 if (success) { |
3162 DCHECK(p->IsEvacuationCandidate()); | 3182 DCHECK(p->IsEvacuationCandidate()); |
3163 DCHECK(p->SweepingDone()); | 3183 DCHECK(p->SweepingDone()); |
3164 p->Unlink(); | 3184 p->Unlink(); |
3165 } else { | 3185 } else { |
3166 // We have partially compacted the page, i.e., some objects may have | 3186 // We have partially compacted the page, i.e., some objects may have |
3167 // moved, others are still in place. | 3187 // moved, others are still in place. |
3168 // We need to: | |
3169 // - Leave the evacuation candidate flag for later processing of slots | |
3170 // buffer entries. | |
3171 // - Leave the slots buffer there for processing of entries added by | |
3172 // the write barrier. | |
3173 // - Rescan the page as slot recording in the migration buffer only | |
3174 // happens upon moving (which we potentially didn't do). | |
3175 // - Leave the page in the list of pages of a space since we could not | |
3176 // fully evacuate it. | |
3177 DCHECK(p->IsEvacuationCandidate()); | |
3178 p->SetFlag(Page::COMPACTION_WAS_ABORTED); | 3188 p->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| 3189 p->ClearEvacuationCandidate(); |
| 3190 // Slots have already been recorded so we just need to add it to the |
| 3191 // sweeper. |
| 3192 heap->mark_compact_collector()->sweeper().AddLatePage( |
| 3193 p->owner()->identity(), p); |
3179 *data += 1; | 3194 *data += 1; |
3180 } | 3195 } |
3181 } | 3196 } |
3182 } | 3197 } |
3183 }; | 3198 }; |
3184 | 3199 |
3185 void MarkCompactCollector::EvacuatePagesInParallel() { | 3200 void MarkCompactCollector::EvacuatePagesInParallel() { |
3186 PageParallelJob<EvacuationJobTraits> job( | 3201 PageParallelJob<EvacuationJobTraits> job( |
3187 heap_, heap_->isolate()->cancelable_task_manager()); | 3202 heap_, heap_->isolate()->cancelable_task_manager()); |
3188 | 3203 |
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3412 object->IterateBody(map->instance_type(), size, visitor); | 3427 object->IterateBody(map->instance_type(), size, visitor); |
3413 } | 3428 } |
3414 } | 3429 } |
3415 | 3430 |
3416 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, | 3431 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, |
3417 Page* page) { | 3432 Page* page) { |
3418 base::LockGuard<base::Mutex> guard(&mutex_); | 3433 base::LockGuard<base::Mutex> guard(&mutex_); |
3419 swept_list_[space->identity()].Add(page); | 3434 swept_list_[space->identity()].Add(page); |
3420 } | 3435 } |
3421 | 3436 |
3422 void MarkCompactCollector::SweepAbortedPages() { | |
3423 // Second pass on aborted pages. | |
3424 for (Page* p : evacuation_candidates_) { | |
3425 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | |
3426 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); | |
3427 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | |
3428 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | |
3429 switch (space->identity()) { | |
3430 case OLD_SPACE: | |
3431 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | |
3432 Sweeper::IGNORE_SKIP_LIST, | |
3433 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); | |
3434 break; | |
3435 case CODE_SPACE: | |
3436 if (FLAG_zap_code_space) { | |
3437 Sweeper::RawSweep< | |
3438 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | |
3439 Sweeper::REBUILD_SKIP_LIST, Sweeper::ZAP_FREE_SPACE>(space, p, | |
3440 nullptr); | |
3441 } else { | |
3442 Sweeper::RawSweep< | |
3443 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | |
3444 Sweeper::REBUILD_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( | |
3445 space, p, nullptr); | |
3446 } | |
3447 break; | |
3448 default: | |
3449 UNREACHABLE(); | |
3450 break; | |
3451 } | |
3452 sweeper().AddSweptPageSafe(space, p); | |
3453 } | |
3454 } | |
3455 } | |
3456 | |
3457 | |
3458 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3437 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
3459 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3438 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
3460 Heap::RelocationLock relocation_lock(heap()); | 3439 Heap::RelocationLock relocation_lock(heap()); |
3461 | 3440 |
3462 { | 3441 { |
3463 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | 3442 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
3464 EvacuationScope evacuation_scope(this); | 3443 EvacuationScope evacuation_scope(this); |
3465 | 3444 |
3466 EvacuateNewSpacePrologue(); | 3445 EvacuateNewSpacePrologue(); |
3467 EvacuatePagesInParallel(); | 3446 EvacuatePagesInParallel(); |
3468 EvacuateNewSpaceEpilogue(); | 3447 EvacuateNewSpaceEpilogue(); |
3469 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | 3448 heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
3470 } | 3449 } |
3471 | 3450 |
3472 UpdatePointersAfterEvacuation(); | 3451 UpdatePointersAfterEvacuation(); |
3473 | 3452 |
3474 // Give pages that are queued to be freed back to the OS. Note that filtering | 3453 // Give pages that are queued to be freed back to the OS. Note that filtering |
3475 // slots only handles old space (for unboxed doubles), and thus map space can | 3454 // slots only handles old space (for unboxed doubles), and thus map space can |
3476 // still contain stale pointers. We only free the chunks after pointer updates | 3455 // still contain stale pointers. We only free the chunks after pointer updates |
3477 // to still have access to page headers. | 3456 // to still have access to page headers. |
3478 heap()->FreeQueuedChunks(); | 3457 heap()->FreeQueuedChunks(); |
3479 | 3458 |
3480 { | 3459 { |
3481 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3460 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
3482 // After updating all pointers, we can finally sweep the aborted pages, | |
3483 // effectively overriding any forward pointers. | |
3484 SweepAbortedPages(); | |
3485 | 3461 |
3486 // EvacuateNewSpaceAndCandidates iterates over new space objects and for | 3462 // EvacuateNewSpaceAndCandidates iterates over new space objects and for |
3487 // ArrayBuffers either re-registers them as live or promotes them. This is | 3463 // ArrayBuffers either re-registers them as live or promotes them. This is |
3488 // needed to properly free them. | 3464 // needed to properly free them. |
3489 heap()->array_buffer_tracker()->FreeDead(false); | 3465 heap()->array_buffer_tracker()->FreeDead(false); |
3490 | 3466 |
3491 // Deallocate evacuated candidate pages. | 3467 // Deallocate evacuated candidate pages. |
3492 ReleaseEvacuationCandidates(); | 3468 ReleaseEvacuationCandidates(); |
3493 } | 3469 } |
3494 | 3470 |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3632 Heap* heap = this->heap(); | 3608 Heap* heap = this->heap(); |
3633 TRACE_GC(heap->tracer(), | 3609 TRACE_GC(heap->tracer(), |
3634 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 3610 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
3635 UpdatePointersInParallel<OLD_TO_OLD>(heap_); | 3611 UpdatePointersInParallel<OLD_TO_OLD>(heap_); |
3636 } | 3612 } |
3637 | 3613 |
3638 { | 3614 { |
3639 TRACE_GC(heap()->tracer(), | 3615 TRACE_GC(heap()->tracer(), |
3640 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); | 3616 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); |
3641 for (Page* p : evacuation_candidates_) { | 3617 for (Page* p : evacuation_candidates_) { |
3642 DCHECK(p->IsEvacuationCandidate()); | 3618 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| 3619 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
| 3620 } |
| 3621 if (!p->IsEvacuationCandidate()) continue; |
3643 // Important: skip list should be cleared only after roots were updated | 3622 // Important: skip list should be cleared only after roots were updated |
3644 // because root iteration traverses the stack and might have to find | 3623 // because root iteration traverses the stack and might have to find |
3645 // code objects from non-updated pc pointing into evacuation candidate. | 3624 // code objects from non-updated pc pointing into evacuation candidate. |
3646 SkipList* list = p->skip_list(); | 3625 SkipList* list = p->skip_list(); |
3647 if (list != NULL) list->Clear(); | 3626 if (list != NULL) list->Clear(); |
3648 | |
3649 // First pass on aborted pages, fixing up all live objects. | |
3650 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | |
3651 p->ClearEvacuationCandidate(); | |
3652 VisitLiveObjectsBody(p, &updating_visitor); | |
3653 } | |
3654 } | 3627 } |
3655 } | 3628 } |
3656 | 3629 |
3657 { | 3630 { |
3658 TRACE_GC(heap()->tracer(), | 3631 TRACE_GC(heap()->tracer(), |
3659 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 3632 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
3660 // Update pointers from external string table. | 3633 // Update pointers from external string table. |
3661 heap_->UpdateReferencesInExternalStringTable( | 3634 heap_->UpdateReferencesInExternalStringTable( |
3662 &UpdateReferenceInExternalStringTableEntry); | 3635 &UpdateReferenceInExternalStringTableEntry); |
3663 | 3636 |
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3896 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3869 MarkBit mark_bit = Marking::MarkBitFrom(host); |
3897 if (Marking::IsBlack(mark_bit)) { | 3870 if (Marking::IsBlack(mark_bit)) { |
3898 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3871 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
3899 RecordRelocSlot(host, &rinfo, target); | 3872 RecordRelocSlot(host, &rinfo, target); |
3900 } | 3873 } |
3901 } | 3874 } |
3902 } | 3875 } |
3903 | 3876 |
3904 } // namespace internal | 3877 } // namespace internal |
3905 } // namespace v8 | 3878 } // namespace v8 |
OLD | NEW |