Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index ac595e6f5fb8d7b800dc22248d61549718e890d1..edc75cfa419634389576b38155f4c3ab4bb35aee 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -1818,6 +1818,28 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final |
| } |
| }; |
| +class MarkCompactCollector::EvacuateRecordOnlyVisitor final |
| + : public MarkCompactCollector::HeapObjectVisitor { |
| + public: |
| + explicit EvacuateRecordOnlyVisitor(AllocationSpace space) : space_(space) {} |
| + |
| + inline bool Visit(HeapObject* object) { |
| + if (space_ == OLD_SPACE) { |
| + RecordMigratedSlotVisitor visitor; |
| + object->IterateBody(&visitor); |
| + } else { |
| + DCHECK_EQ(space_, CODE_SPACE); |
| + // Add a typed slot for the whole code object. |
| + RememberedSet<OLD_TO_OLD>::InsertTyped( |
|
Michael Lippautz
2016/04/14 07:50:11
We also need to record within CODE_SPACE. I am usi
|
| + Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT, |
| + object->address()); |
| + } |
| + return true; |
| + } |
| + |
| + private: |
| + AllocationSpace space_; |
| +}; |
| void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
| PageIterator it(space); |
| @@ -3092,6 +3114,17 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { |
| DCHECK(chunk->IsEvacuationCandidate()); |
| DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); |
| success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); |
| + if (!success) { |
| + // Aborted compaction page. We can record slots here to have them |
| + // processed in parallel later on. |
| + EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); |
| + success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); |
| + DCHECK(success); |
| + USE(success); |
| + // We need to return failure here to indicate that we want this page added |
| + // to the sweeper. |
| + return false; |
| + } |
| } |
| return success; |
| } |
| @@ -3152,8 +3185,8 @@ class EvacuationJobTraits { |
| return evacuator->EvacuatePage(chunk); |
| } |
| - static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success, |
| - PerPageData data) { |
| + static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
| + bool success, PerPageData data) { |
| if (chunk->InNewSpace()) { |
| DCHECK(success); |
| } else { |
| @@ -3165,17 +3198,10 @@ class EvacuationJobTraits { |
| } else { |
| // We have partially compacted the page, i.e., some objects may have |
| // moved, others are still in place. |
| - // We need to: |
| - // - Leave the evacuation candidate flag for later processing of slots |
| - // buffer entries. |
| - // - Leave the slots buffer there for processing of entries added by |
| - // the write barrier. |
| - // - Rescan the page as slot recording in the migration buffer only |
| - // happens upon moving (which we potentially didn't do). |
| - // - Leave the page in the list of pages of a space since we could not |
| - // fully evacuate it. |
| - DCHECK(p->IsEvacuationCandidate()); |
| p->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| + p->ClearEvacuationCandidate(); |
| + // Slots have already been recorded so we just need to add it to the |
| + // sweeper. |
|
Michael Lippautz
2016/04/14 07:50:11
We cannot add the page to the sweeper here because
|
| *data += 1; |
| } |
| } |
| @@ -3419,42 +3445,6 @@ void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, |
| swept_list_[space->identity()].Add(page); |
| } |
| -void MarkCompactCollector::SweepAbortedPages() { |
| - // Second pass on aborted pages. |
| - for (Page* p : evacuation_candidates_) { |
| - if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| - p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); |
| - p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| - PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| - switch (space->identity()) { |
| - case OLD_SPACE: |
| - Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| - Sweeper::IGNORE_SKIP_LIST, |
| - Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); |
| - break; |
| - case CODE_SPACE: |
| - if (FLAG_zap_code_space) { |
| - Sweeper::RawSweep< |
| - Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| - Sweeper::REBUILD_SKIP_LIST, Sweeper::ZAP_FREE_SPACE>(space, p, |
| - nullptr); |
| - } else { |
| - Sweeper::RawSweep< |
| - Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| - Sweeper::REBUILD_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( |
| - space, p, nullptr); |
| - } |
| - break; |
| - default: |
| - UNREACHABLE(); |
| - break; |
| - } |
| - sweeper().AddSweptPageSafe(space, p); |
| - } |
| - } |
| -} |
| - |
| - |
| void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| Heap::RelocationLock relocation_lock(heap()); |
| @@ -3479,9 +3469,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| { |
| TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| - // After updating all pointers, we can finally sweep the aborted pages, |
| - // effectively overriding any forward pointers. |
| - SweepAbortedPages(); |
| + |
| + for (Page* p : evacuation_candidates_) { |
|
Michael Lippautz
2016/04/14 07:50:11
Moving this part to MC_EVACUATE_CLEAN_UP as it has
|
| + // Important: skip list should be cleared only after roots were updated |
| + // because root iteration traverses the stack and might have to find |
| + // code objects from non-updated pc pointing into evacuation candidate. |
| + SkipList* list = p->skip_list(); |
| + if (list != NULL) list->Clear(); |
| + if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| + sweeper().AddLatePage(p->owner()->identity(), p); |
| + p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
| + } |
| + } |
| // EvacuateNewSpaceAndCandidates iterates over new space objects and for |
| // ArrayBuffers either re-registers them as live or promotes them. This is |
| @@ -3637,25 +3636,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| { |
| TRACE_GC(heap()->tracer(), |
| - GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); |
| - for (Page* p : evacuation_candidates_) { |
| - DCHECK(p->IsEvacuationCandidate()); |
| - // Important: skip list should be cleared only after roots were updated |
| - // because root iteration traverses the stack and might have to find |
| - // code objects from non-updated pc pointing into evacuation candidate. |
| - SkipList* list = p->skip_list(); |
| - if (list != NULL) list->Clear(); |
| - |
| - // First pass on aborted pages, fixing up all live objects. |
| - if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| - p->ClearEvacuationCandidate(); |
| - VisitLiveObjectsBody(p, &updating_visitor); |
| - } |
| - } |
| - } |
| - |
| - { |
| - TRACE_GC(heap()->tracer(), |
| GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
| // Update pointers from external string table. |
| heap_->UpdateReferencesInExternalStringTable( |