Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(535)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1890553002: Revert of [heap] Better integrate handling of aborted compaction pages (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1800 matching lines...) Expand 10 before | Expand all | Expand 10 after
1811 Page::FromAddress(object->address())->owner()->identity()); 1811 Page::FromAddress(object->address())->owner()->identity());
1812 HeapObject* target_object = nullptr; 1812 HeapObject* target_object = nullptr;
1813 if (TryEvacuateObject(target_space, object, &target_object)) { 1813 if (TryEvacuateObject(target_space, object, &target_object)) {
1814 DCHECK(object->map_word().IsForwardingAddress()); 1814 DCHECK(object->map_word().IsForwardingAddress());
1815 return true; 1815 return true;
1816 } 1816 }
1817 return false; 1817 return false;
1818 } 1818 }
1819 }; 1819 };
1820 1820
1821 class MarkCompactCollector::EvacuateRecordOnlyVisitor final
1822 : public MarkCompactCollector::HeapObjectVisitor {
1823 public:
1824 bool Visit(HeapObject* object) {
1825 RecordMigratedSlotVisitor visitor;
1826 object->IterateBodyFast(&visitor);
1827 return true;
1828 }
1829 };
1830 1821
1831 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { 1822 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1832 PageIterator it(space); 1823 PageIterator it(space);
1833 while (it.has_next()) { 1824 while (it.has_next()) {
1834 Page* p = it.next(); 1825 Page* p = it.next();
1835 if (!p->IsFlagSet(Page::BLACK_PAGE)) { 1826 if (!p->IsFlagSet(Page::BLACK_PAGE)) {
1836 DiscoverGreyObjectsOnPage(p); 1827 DiscoverGreyObjectsOnPage(p);
1837 } 1828 }
1838 if (marking_deque()->IsFull()) return; 1829 if (marking_deque()->IsFull()) return;
1839 } 1830 }
(...skipping 1254 matching lines...) Expand 10 before | Expand all | Expand 10 after
3094 if (chunk->InNewSpace()) { 3085 if (chunk->InNewSpace()) {
3095 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), 3086 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
3096 NewSpacePage::kSweepingDone); 3087 NewSpacePage::kSweepingDone);
3097 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); 3088 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
3098 DCHECK(success); 3089 DCHECK(success);
3099 USE(success); 3090 USE(success);
3100 } else { 3091 } else {
3101 DCHECK(chunk->IsEvacuationCandidate()); 3092 DCHECK(chunk->IsEvacuationCandidate());
3102 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); 3093 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
3103 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); 3094 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
3104 if (!success) {
3105 // Aborted compaction page. We can record slots here to have them
3106 // processed in parallel later on.
3107 EvacuateRecordOnlyVisitor record_visitor;
3108 success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
3109 DCHECK(success);
3110 USE(success);
3111 // We need to return failure here to indicate that we want this page added
3112 // to the sweeper.
3113 return false;
3114 }
3115 } 3095 }
3116 return success; 3096 return success;
3117 } 3097 }
3118 3098
3119 void MarkCompactCollector::Evacuator::Finalize() { 3099 void MarkCompactCollector::Evacuator::Finalize() {
3120 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); 3100 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3121 heap()->code_space()->MergeCompactionSpace( 3101 heap()->code_space()->MergeCompactionSpace(
3122 compaction_spaces_.Get(CODE_SPACE)); 3102 compaction_spaces_.Get(CODE_SPACE));
3123 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 3103 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3124 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); 3104 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
3165 typedef int* PerPageData; // Pointer to number of aborted pages. 3145 typedef int* PerPageData; // Pointer to number of aborted pages.
3166 typedef MarkCompactCollector::Evacuator* PerTaskData; 3146 typedef MarkCompactCollector::Evacuator* PerTaskData;
3167 3147
3168 static const bool NeedSequentialFinalization = true; 3148 static const bool NeedSequentialFinalization = true;
3169 3149
3170 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, 3150 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3171 MemoryChunk* chunk, PerPageData) { 3151 MemoryChunk* chunk, PerPageData) {
3172 return evacuator->EvacuatePage(chunk); 3152 return evacuator->EvacuatePage(chunk);
3173 } 3153 }
3174 3154
3175 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, 3155 static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
3176 bool success, PerPageData data) { 3156 PerPageData data) {
3177 if (chunk->InNewSpace()) { 3157 if (chunk->InNewSpace()) {
3178 DCHECK(success); 3158 DCHECK(success);
3179 } else { 3159 } else {
3180 Page* p = static_cast<Page*>(chunk); 3160 Page* p = static_cast<Page*>(chunk);
3181 if (success) { 3161 if (success) {
3182 DCHECK(p->IsEvacuationCandidate()); 3162 DCHECK(p->IsEvacuationCandidate());
3183 DCHECK(p->SweepingDone()); 3163 DCHECK(p->SweepingDone());
3184 p->Unlink(); 3164 p->Unlink();
3185 } else { 3165 } else {
3186 // We have partially compacted the page, i.e., some objects may have 3166 // We have partially compacted the page, i.e., some objects may have
3187 // moved, others are still in place. 3167 // moved, others are still in place.
3168 // We need to:
3169 // - Leave the evacuation candidate flag for later processing of slots
3170 // buffer entries.
3171 // - Leave the slots buffer there for processing of entries added by
3172 // the write barrier.
3173 // - Rescan the page as slot recording in the migration buffer only
3174 // happens upon moving (which we potentially didn't do).
3175 // - Leave the page in the list of pages of a space since we could not
3176 // fully evacuate it.
3177 DCHECK(p->IsEvacuationCandidate());
3188 p->SetFlag(Page::COMPACTION_WAS_ABORTED); 3178 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3189 p->ClearEvacuationCandidate();
3190 // Slots have already been recorded so we just need to add it to the
3191 // sweeper.
3192 heap->mark_compact_collector()->sweeper().AddLatePage(
3193 p->owner()->identity(), p);
3194 *data += 1; 3179 *data += 1;
3195 } 3180 }
3196 } 3181 }
3197 } 3182 }
3198 }; 3183 };
3199 3184
3200 void MarkCompactCollector::EvacuatePagesInParallel() { 3185 void MarkCompactCollector::EvacuatePagesInParallel() {
3201 PageParallelJob<EvacuationJobTraits> job( 3186 PageParallelJob<EvacuationJobTraits> job(
3202 heap_, heap_->isolate()->cancelable_task_manager()); 3187 heap_, heap_->isolate()->cancelable_task_manager());
3203 3188
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after
3427 object->IterateBody(map->instance_type(), size, visitor); 3412 object->IterateBody(map->instance_type(), size, visitor);
3428 } 3413 }
3429 } 3414 }
3430 3415
3431 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, 3416 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
3432 Page* page) { 3417 Page* page) {
3433 base::LockGuard<base::Mutex> guard(&mutex_); 3418 base::LockGuard<base::Mutex> guard(&mutex_);
3434 swept_list_[space->identity()].Add(page); 3419 swept_list_[space->identity()].Add(page);
3435 } 3420 }
3436 3421
3422 void MarkCompactCollector::SweepAbortedPages() {
3423 // Second pass on aborted pages.
3424 for (Page* p : evacuation_candidates_) {
3425 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3426 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
3427 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3428 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3429 switch (space->identity()) {
3430 case OLD_SPACE:
3431 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
3432 Sweeper::IGNORE_SKIP_LIST,
3433 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
3434 break;
3435 case CODE_SPACE:
3436 if (FLAG_zap_code_space) {
3437 Sweeper::RawSweep<
3438 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
3439 Sweeper::REBUILD_SKIP_LIST, Sweeper::ZAP_FREE_SPACE>(space, p,
3440 nullptr);
3441 } else {
3442 Sweeper::RawSweep<
3443 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
3444 Sweeper::REBUILD_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
3445 space, p, nullptr);
3446 }
3447 break;
3448 default:
3449 UNREACHABLE();
3450 break;
3451 }
3452 sweeper().AddSweptPageSafe(space, p);
3453 }
3454 }
3455 }
3456
3457
3437 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3458 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3438 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); 3459 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3439 Heap::RelocationLock relocation_lock(heap()); 3460 Heap::RelocationLock relocation_lock(heap());
3440 3461
3441 { 3462 {
3442 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); 3463 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3443 EvacuationScope evacuation_scope(this); 3464 EvacuationScope evacuation_scope(this);
3444 3465
3445 EvacuateNewSpacePrologue(); 3466 EvacuateNewSpacePrologue();
3446 EvacuatePagesInParallel(); 3467 EvacuatePagesInParallel();
3447 EvacuateNewSpaceEpilogue(); 3468 EvacuateNewSpaceEpilogue();
3448 heap()->new_space()->set_age_mark(heap()->new_space()->top()); 3469 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3449 } 3470 }
3450 3471
3451 UpdatePointersAfterEvacuation(); 3472 UpdatePointersAfterEvacuation();
3452 3473
3453 // Give pages that are queued to be freed back to the OS. Note that filtering 3474 // Give pages that are queued to be freed back to the OS. Note that filtering
3454 // slots only handles old space (for unboxed doubles), and thus map space can 3475 // slots only handles old space (for unboxed doubles), and thus map space can
3455 // still contain stale pointers. We only free the chunks after pointer updates 3476 // still contain stale pointers. We only free the chunks after pointer updates
3456 // to still have access to page headers. 3477 // to still have access to page headers.
3457 heap()->FreeQueuedChunks(); 3478 heap()->FreeQueuedChunks();
3458 3479
3459 { 3480 {
3460 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 3481 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3482 // After updating all pointers, we can finally sweep the aborted pages,
3483 // effectively overriding any forward pointers.
3484 SweepAbortedPages();
3461 3485
3462 // EvacuateNewSpaceAndCandidates iterates over new space objects and for 3486 // EvacuateNewSpaceAndCandidates iterates over new space objects and for
3463 // ArrayBuffers either re-registers them as live or promotes them. This is 3487 // ArrayBuffers either re-registers them as live or promotes them. This is
3464 // needed to properly free them. 3488 // needed to properly free them.
3465 heap()->array_buffer_tracker()->FreeDead(false); 3489 heap()->array_buffer_tracker()->FreeDead(false);
3466 3490
3467 // Deallocate evacuated candidate pages. 3491 // Deallocate evacuated candidate pages.
3468 ReleaseEvacuationCandidates(); 3492 ReleaseEvacuationCandidates();
3469 } 3493 }
3470 3494
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
3608 Heap* heap = this->heap(); 3632 Heap* heap = this->heap();
3609 TRACE_GC(heap->tracer(), 3633 TRACE_GC(heap->tracer(),
3610 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); 3634 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3611 UpdatePointersInParallel<OLD_TO_OLD>(heap_); 3635 UpdatePointersInParallel<OLD_TO_OLD>(heap_);
3612 } 3636 }
3613 3637
3614 { 3638 {
3615 TRACE_GC(heap()->tracer(), 3639 TRACE_GC(heap()->tracer(),
3616 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); 3640 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
3617 for (Page* p : evacuation_candidates_) { 3641 for (Page* p : evacuation_candidates_) {
3618 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 3642 DCHECK(p->IsEvacuationCandidate());
3619 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3620 }
3621 if (!p->IsEvacuationCandidate()) continue;
3622 // Important: skip list should be cleared only after roots were updated 3643 // Important: skip list should be cleared only after roots were updated
3623 // because root iteration traverses the stack and might have to find 3644 // because root iteration traverses the stack and might have to find
3624 // code objects from non-updated pc pointing into evacuation candidate. 3645 // code objects from non-updated pc pointing into evacuation candidate.
3625 SkipList* list = p->skip_list(); 3646 SkipList* list = p->skip_list();
3626 if (list != NULL) list->Clear(); 3647 if (list != NULL) list->Clear();
3648
3649 // First pass on aborted pages, fixing up all live objects.
3650 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3651 p->ClearEvacuationCandidate();
3652 VisitLiveObjectsBody(p, &updating_visitor);
3653 }
3627 } 3654 }
3628 } 3655 }
3629 3656
3630 { 3657 {
3631 TRACE_GC(heap()->tracer(), 3658 TRACE_GC(heap()->tracer(),
3632 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); 3659 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3633 // Update pointers from external string table. 3660 // Update pointers from external string table.
3634 heap_->UpdateReferencesInExternalStringTable( 3661 heap_->UpdateReferencesInExternalStringTable(
3635 &UpdateReferenceInExternalStringTableEntry); 3662 &UpdateReferenceInExternalStringTableEntry);
3636 3663
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
3869 MarkBit mark_bit = Marking::MarkBitFrom(host); 3896 MarkBit mark_bit = Marking::MarkBitFrom(host);
3870 if (Marking::IsBlack(mark_bit)) { 3897 if (Marking::IsBlack(mark_bit)) {
3871 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 3898 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3872 RecordRelocSlot(host, &rinfo, target); 3899 RecordRelocSlot(host, &rinfo, target);
3873 } 3900 }
3874 } 3901 }
3875 } 3902 }
3876 3903
3877 } // namespace internal 3904 } // namespace internal
3878 } // namespace v8 3905 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698