Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index e537689c4a5ca4ba35530fc8d81a55f3bfd37596..01abca195311ef85d9d1389e202d25d2d9e0cf44 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -60,6 +60,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) |
| marking_deque_memory_committed_(0), |
| code_flusher_(nullptr), |
| have_code_to_deoptimize_(false), |
| + sweeping_list_shared_(nullptr), |
| compacting_(false), |
| sweeping_in_progress_(false), |
| pending_sweeper_tasks_semaphore_(0), |
| @@ -481,6 +482,17 @@ class MarkCompactCollector::SweeperTask : public v8::Task { |
| heap_->mark_compact_collector()->SweepInParallel( |
| heap_->paged_space(space_id), 0); |
| } |
| + std::vector<Page*>* shared_sweeping_list = nullptr; |
| + { |
| + base::LockGuard<base::Mutex> guard( |
| + heap_->mark_compact_collector()->swept_pages_mutex()); |
| + shared_sweeping_list = |
| + heap_->mark_compact_collector()->sweeping_list_shared_; |
| + } |
| + if (shared_sweeping_list != nullptr) { |
| + heap_->mark_compact_collector()->SweepInParallel(*shared_sweeping_list, |
| + heap_->old_space(), 0); |
| + } |
| heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal(); |
| } |
| @@ -496,8 +508,6 @@ void MarkCompactCollector::StartSweeperThreads() { |
| new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask); |
| V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask); |
| - V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| - new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask); |
| } |
| @@ -1765,6 +1775,33 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final |
| HashMap* local_pretenuring_feedback_; |
| }; |
| +class MarkCompactCollector::EvacuateNewSpacePageVisitor final |
| + : public MarkCompactCollector::HeapObjectVisitor { |
| + public: |
| + EvacuateNewSpacePageVisitor() : promoted_size_(0) {} |
| + |
| + static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) { |
| + page->heap()->new_space()->ReplaceWithEmptyPage(page); |
|
Hannes Payer (out of office)
2016/04/08 10:42:40
Why don't we take care of the new space size at th
Michael Lippautz
2016/04/08 11:30:00
As discussed offline: Let's keep it for now. We ca
|
| + Page* new_page = Page::Convert(page, owner); |
| + new_page->SetFlag(Page::FAST_EVACUATION); |
| + } |
| + |
| + bool Visit(HeapObject* object) { |
| + promoted_size_ += object->Size(); |
| + if (V8_UNLIKELY(object->IsJSArrayBuffer())) { |
| + object->GetHeap()->array_buffer_tracker()->Promote( |
| + JSArrayBuffer::cast(object)); |
| + } |
| + RecordMigratedSlotVisitor visitor; |
| + object->IterateBodyFast(&visitor); |
| + return true; |
| + } |
| + |
| + intptr_t promoted_size() { return promoted_size_; } |
| + |
| + private: |
| + intptr_t promoted_size_; |
| +}; |
| class MarkCompactCollector::EvacuateOldSpaceVisitor final |
| : public MarkCompactCollector::EvacuateVisitorBase { |
| @@ -2930,17 +2967,24 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() { |
| newspace_evacuation_candidates_.Rewind(0); |
| } |
| - |
| class MarkCompactCollector::Evacuator : public Malloced { |
| public: |
| + // NewSpacePages with more live bytes than this threshold qualify for fast |
| + // evacuation. |
| + static int FastEvacuationThreshold() { |
| + return FLAG_page_evacuation_threshold * NewSpacePage::kAllocatableMemory / |
| + 100; |
| + } |
| + |
| explicit Evacuator(MarkCompactCollector* collector) |
| : collector_(collector), |
| compaction_spaces_(collector->heap()), |
| local_pretenuring_feedback_(HashMap::PointersMatch, |
| kInitialLocalPretenuringFeedbackCapacity), |
| - new_space_visitor_(collector->heap(), &compaction_spaces_, |
| - &local_pretenuring_feedback_), |
| - old_space_visitor_(collector->heap(), &compaction_spaces_), |
| + evac_new_space_visitor_(collector->heap(), &compaction_spaces_, |
| + &local_pretenuring_feedback_), |
| + evac_new_space_page_visitor_(), |
| + evac_old_space_visitor_(collector->heap(), &compaction_spaces_), |
| duration_(0.0), |
| bytes_compacted_(0) {} |
| @@ -2962,6 +3006,7 @@ class MarkCompactCollector::Evacuator : public Malloced { |
| bytes_compacted_ += bytes_compacted; |
| } |
| + template <IterationMode mode> |
| inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); |
| MarkCompactCollector* collector_; |
| @@ -2971,32 +3016,36 @@ class MarkCompactCollector::Evacuator : public Malloced { |
| HashMap local_pretenuring_feedback_; |
| // Visitors for the corresponding spaces. |
| - EvacuateNewSpaceVisitor new_space_visitor_; |
| - EvacuateOldSpaceVisitor old_space_visitor_; |
| + EvacuateNewSpaceVisitor evac_new_space_visitor_; |
| + EvacuateNewSpacePageVisitor evac_new_space_page_visitor_; |
| + EvacuateOldSpaceVisitor evac_old_space_visitor_; |
| // Book keeping info. |
| double duration_; |
| intptr_t bytes_compacted_; |
| }; |
| +template <MarkCompactCollector::IterationMode mode> |
| bool MarkCompactCollector::Evacuator::EvacuateSinglePage( |
| MemoryChunk* p, HeapObjectVisitor* visitor) { |
| bool success = false; |
| - DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); |
| + DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || |
| + p->IsFlagSet(Page::FAST_EVACUATION)); |
| int saved_live_bytes = p->LiveBytes(); |
| double evacuation_time; |
| { |
| AlwaysAllocateScope always_allocate(heap()->isolate()); |
| TimedScope timed_scope(&evacuation_time); |
| - success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits); |
| + success = collector_->VisitLiveObjects(p, visitor, mode); |
| } |
| if (FLAG_trace_evacuation) { |
| - PrintIsolate(heap()->isolate(), |
| - "evacuation[%p]: page=%p new_space=%d executable=%d " |
| - "live_bytes=%d time=%f\n", |
| - this, p, p->InNewSpace(), |
| - p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, |
| - evacuation_time); |
| + PrintIsolate( |
| + heap()->isolate(), |
| + "evacuation[%p]: page=%p page_evacuation=%d new_space=%d executable=%d " |
| + "live_bytes=%d time=%f\n", |
| + this, p, mode == kKeepMarking, p->InNewSpace(), |
| + p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, |
| + evacuation_time); |
| } |
| if (success) { |
| ReportCompactionProgress(evacuation_time, saved_live_bytes); |
| @@ -3009,13 +3058,20 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { |
| if (chunk->InNewSpace()) { |
| DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), |
| NewSpacePage::kSweepingDone); |
| - success = EvacuateSinglePage(chunk, &new_space_visitor_); |
| + success = |
| + EvacuateSinglePage<kClearMarkbits>(chunk, &evac_new_space_visitor_); |
| DCHECK(success); |
| USE(success); |
| } else { |
| - DCHECK(chunk->IsEvacuationCandidate()); |
| DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); |
| - success = EvacuateSinglePage(chunk, &old_space_visitor_); |
| + if (chunk->IsFlagSet(MemoryChunk::FAST_EVACUATION)) { |
| + success = EvacuateSinglePage<kKeepMarking>(chunk, |
| + &evac_new_space_page_visitor_); |
| + } else { |
| + DCHECK(chunk->IsEvacuationCandidate()); |
| + success = |
| + EvacuateSinglePage<kClearMarkbits>(chunk, &evac_old_space_visitor_); |
| + } |
| } |
| return success; |
| } |
| @@ -3025,12 +3081,15 @@ void MarkCompactCollector::Evacuator::Finalize() { |
| heap()->code_space()->MergeCompactionSpace( |
| compaction_spaces_.Get(CODE_SPACE)); |
| heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
| - heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); |
| + heap()->IncrementPromotedObjectsSize( |
| + evac_new_space_visitor_.promoted_size() + |
| + evac_new_space_page_visitor_.promoted_size()); |
| heap()->IncrementSemiSpaceCopiedObjectSize( |
| - new_space_visitor_.semispace_copied_size()); |
| + evac_new_space_visitor_.semispace_copied_size()); |
| heap()->IncrementYoungSurvivorsCounter( |
| - new_space_visitor_.promoted_size() + |
| - new_space_visitor_.semispace_copied_size()); |
| + evac_new_space_visitor_.promoted_size() + |
| + evac_new_space_visitor_.semispace_copied_size() + |
| + evac_new_space_page_visitor_.promoted_size()); |
| heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
| } |
| @@ -3080,6 +3139,8 @@ class EvacuationJobTraits { |
| PerPageData data) { |
| if (chunk->InNewSpace()) { |
| DCHECK(success); |
| + } else if (chunk->IsFlagSet(Page::FAST_EVACUATION)) { |
| + // Nothing to do here, as the page is still owned by the compaction space. |
| } else { |
| Page* p = static_cast<Page*>(chunk); |
| if (success) { |
| @@ -3118,6 +3179,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| } |
| for (NewSpacePage* page : newspace_evacuation_candidates_) { |
| live_bytes += page->LiveBytes(); |
| + if (!page->NeverEvacuate() && |
| + (page->LiveBytes() > Evacuator::FastEvacuationThreshold()) && |
| + page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
| + EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); |
| + } |
| job.AddPage(page, &abandoned_pages); |
| } |
| DCHECK_GE(job.NumberOfPages(), 1); |
| @@ -3141,6 +3207,45 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| } |
| delete[] evacuators; |
| + bool fast_evac_pages = false; |
|
Hannes Payer (out of office)
2016/04/08 10:42:40
Can you factor this block of code out into a metho
Michael Lippautz
2016/04/08 11:30:00
Done.
|
| + std::vector<Page*>* shared_sweep_list = nullptr; |
| + for (MemoryChunk* chunk : newspace_evacuation_candidates_) { |
| + if (chunk->IsFlagSet(Page::FAST_EVACUATION)) { |
| + Page* page = reinterpret_cast<Page*>(chunk); |
| + page->ClearFlag(Page::FAST_EVACUATION); |
| + page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
| + PagedSpace* space = static_cast<PagedSpace*>(page->owner()); |
| + DCHECK_EQ(space, heap()->old_space()); |
| + int to_sweep = page->area_size() - page->LiveBytes(); |
| + space->accounting_stats_.ShrinkSpace(to_sweep); |
| + space->UnlinkFreeListCategories(page); |
|
Hannes Payer (out of office)
2016/04/08 10:42:40
New space pages never have free lists. Can you DCH
Michael Lippautz
2016/04/08 11:30:00
Done.
|
| + page->ForAllFreeListCategories( |
| + [](FreeListCategory* category) { category->Reset(); }); |
| + if (shared_sweep_list == nullptr) { |
| + shared_sweep_list = new std::vector<Page*>(); |
| + } |
| + shared_sweep_list->push_back(page); |
| + fast_evac_pages = true; |
| + } |
| + } |
| + if (fast_evac_pages) { |
| + { |
| + base::LockGuard<base::Mutex> guard(swept_pages_mutex()); |
| + sweeping_list_shared_ = shared_sweep_list; |
| + } |
| + heap() |
| + ->external_string_table_ |
| + .CleanUp<Heap::ExternalStringTable::CleanupMode::kPromoteOnly>(); |
| + } |
| + if (FLAG_concurrent_sweeping) { |
| + V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| + new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask); |
| + } else { |
| + if (fast_evac_pages) { |
| + SweepInParallel(*sweeping_list_shared_, heap()->old_space(), 0); |
| + } |
| + } |
| + |
| if (FLAG_trace_evacuation) { |
| PrintIsolate( |
| isolate(), |
| @@ -3252,7 +3357,6 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) { |
| return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| } |
| - |
| void MarkCompactCollector::InvalidateCode(Code* code) { |
| if (heap_->incremental_marking()->IsCompacting() && |
| !ShouldSkipEvacuationSlotRecording(code)) { |
| @@ -3472,12 +3576,8 @@ class PointerUpdateJobTraits { |
| static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { |
| MapWord map_word = object->map_word(); |
| - // Since we only filter invalid slots in old space, the store buffer can |
| - // still contain stale pointers in large object and in map spaces. Ignore |
| - // these pointers here. |
| - DCHECK(map_word.IsForwardingAddress() || |
| - !object->GetHeap()->old_space()->Contains( |
| - reinterpret_cast<Address>(address))); |
| + // There could still be stale pointers in large object space, map space, |
| + // and old space for pages that have been promoted. |
| if (map_word.IsForwardingAddress()) { |
| // Update the corresponding slot. |
| *address = map_word.ToForwardingAddress(); |
| @@ -3613,10 +3713,18 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| int MarkCompactCollector::SweepInParallel(PagedSpace* space, |
| int required_freed_bytes, |
| int max_pages) { |
| + return SweepInParallel(sweeping_list(space), space, required_freed_bytes, |
| + max_pages); |
| +} |
| + |
| +int MarkCompactCollector::SweepInParallel(std::vector<Page*>& pages, |
| + PagedSpace* space, |
| + int required_freed_bytes, |
| + int max_pages) { |
| int max_freed = 0; |
| int max_freed_overall = 0; |
| int page_count = 0; |
| - for (Page* p : sweeping_list(space)) { |
| + for (Page* p : pages) { |
| max_freed = SweepInParallel(p, space); |
| DCHECK(max_freed >= 0); |
| if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { |
| @@ -3631,7 +3739,6 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space, |
| return max_freed_overall; |
| } |
| - |
| int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { |
| int max_freed = 0; |
| if (page->mutex()->TryLock()) { |
| @@ -3776,6 +3883,11 @@ void MarkCompactCollector::ParallelSweepSpacesComplete() { |
| sweeping_list(heap()->old_space()).clear(); |
| sweeping_list(heap()->code_space()).clear(); |
| sweeping_list(heap()->map_space()).clear(); |
| + if (sweeping_list_shared_ != nullptr) { |
| + base::LockGuard<base::Mutex> guard(swept_pages_mutex()); |
| + delete sweeping_list_shared_; |
| + sweeping_list_shared_ = nullptr; |
| + } |
| } |
| Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } |