Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index 42d1b15ec127dd85de33bd6b3950c363239cc6ba..8dd065a251a6bf766753196e1d54f732a93c8709 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -299,6 +299,28 @@ class FullEvacuationVerifier : public EvacuationVerifier { |
| } |
| }; |
| +class YoungGenerationEvacuationVerifier : public EvacuationVerifier { |
| + public: |
| + explicit YoungGenerationEvacuationVerifier(Heap* heap) |
| + : EvacuationVerifier(heap) {} |
| + |
| + void Run() override { |
| + VerifyRoots(VISIT_ALL_IN_SCAVENGE); |
| + VerifyEvacuation(heap_->new_space()); |
|
ulan
2017/04/26 09:51:28
Verification should be done for the whole heap.
Michael Lippautz
2017/05/02 11:22:00
Done.
|
| + } |
| + |
| + protected: |
| + void VerifyPointers(Object** start, Object** end) override { |
| + for (Object** current = start; current < end; current++) { |
| + if ((*current)->IsHeapObject()) { |
| + HeapObject* object = HeapObject::cast(*current); |
| + if (!heap_->InNewSpace(object)) return; |
|
ulan
2017/04/26 09:51:28
Let's instead do a check that object is not in fro
Michael Lippautz
2017/05/02 11:22:00
Done.
|
| + CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
| + } |
| + } |
| + } |
| +}; |
| + |
| } // namespace |
| #endif // VERIFY_HEAP |
| @@ -1595,6 +1617,65 @@ class ExternalStringTableCleaner : public RootVisitor { |
| Heap* heap_; |
| }; |
| +// Helper class for pruning the string table. |
| +class YoungGenerationExternalStringTableCleaner : public RootVisitor { |
| + public: |
| + YoungGenerationExternalStringTableCleaner( |
| + const MinorMarkCompactCollector& collector) |
| + : heap_(collector.heap()), collector_(collector) {} |
| + |
| + void VisitRootPointers(Root root, Object** start, Object** end) override { |
| + DCHECK_EQ(root, Root::kExternalStringsTable); |
| + // Visit all HeapObject pointers in [start, end). |
| + for (Object** p = start; p < end; p++) { |
| + Object* o = *p; |
| + if (o->IsHeapObject()) { |
| + HeapObject* heap_object = HeapObject::cast(o); |
| + if (ObjectMarking::IsWhite(heap_object, |
| + collector_.marking_state(heap_object))) { |
| + if (o->IsExternalString()) { |
| + heap_->FinalizeExternalString(String::cast(*p)); |
| + } else { |
| + // The original external string may have been internalized. |
| + DCHECK(o->IsThinString()); |
| + } |
| + // Set the entry to the_hole_value (as deleted). |
| + *p = heap_->the_hole_value(); |
| + } |
| + } |
| + } |
| + } |
| + |
| + private: |
| + Heap* heap_; |
| + const MinorMarkCompactCollector& collector_; |
| +}; |
| + |
| +// Marked young generation objects and all old generation objects will be |
| +// retained. |
| +class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| + public: |
| + explicit MinorMarkCompactWeakObjectRetainer( |
| + const MinorMarkCompactCollector& collector) |
| + : collector_(collector) {} |
| + |
| + virtual Object* RetainAs(Object* object) { |
| + HeapObject* heap_object = HeapObject::cast(object); |
| + if (!collector_.heap()->InNewSpace(heap_object)) return object; |
| + |
| + DCHECK(!ObjectMarking::IsGrey(heap_object, |
| + MarkingState::External(heap_object))); |
|
ulan
2017/04/26 09:51:28
Did you mean collector_.marking_state(heap_object)
Michael Lippautz
2017/05/02 11:22:00
Done.
|
| + if (ObjectMarking::IsBlack(heap_object, |
| + collector_.marking_state(heap_object))) { |
| + return object; |
| + } |
| + return nullptr; |
| + } |
| + |
| + private: |
| + const MinorMarkCompactCollector& collector_; |
| +}; |
| + |
| // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
| // are retained. |
| class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| @@ -1719,7 +1800,6 @@ class RecordMigratedSlotVisitor : public ObjectVisitor { |
| collector_->RecordRelocSlot(host, rinfo, cell); |
| } |
| - // Entries that will never move. |
| inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override { |
| DCHECK_EQ(host, rinfo->host()); |
| DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); |
| @@ -1750,21 +1830,114 @@ class RecordMigratedSlotVisitor : public ObjectVisitor { |
| MarkCompactCollector* collector_; |
| }; |
| +class YoungGenerationRecordMigratedSlotVisitor final |
| + : public RecordMigratedSlotVisitor { |
| + public: |
| + explicit YoungGenerationRecordMigratedSlotVisitor( |
| + MarkCompactCollector* collector) |
| + : RecordMigratedSlotVisitor(collector) {} |
| + |
| + inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final { |
| + Address code_entry = Memory::Address_at(code_entry_slot); |
| + if (Page::FromAddress(code_entry)->IsEvacuationCandidate() && |
| + IsLive(host)) { |
| + RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), |
| + nullptr, CODE_ENTRY_SLOT, |
| + code_entry_slot); |
| + } |
| + } |
| + |
| + void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } |
| + void VisitDebugTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } |
| + void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final { |
| + UNREACHABLE(); |
| + } |
| + void VisitCellPointer(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); } |
| + void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final { |
| + UNREACHABLE(); |
| + } |
| + |
| + private: |
| + // Only record slots for host objects that are considered as live by the full |
| + // collector. |
| + inline bool IsLive(HeapObject* object) { |
| + return ObjectMarking::IsBlack(object, collector_->marking_state(object)); |
| + } |
| + |
| + inline void RecordMigratedSlot(HeapObject* host, Object* value, |
| + Address slot) final { |
| + if (value->IsHeapObject()) { |
| + Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); |
| + if (p->InNewSpace()) { |
| + RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); |
| + } else if (p->IsEvacuationCandidate() && IsLive(host)) { |
| + RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); |
| + } |
| + } |
| + } |
| +}; |
| + |
| class HeapObjectVisitor { |
| public: |
| virtual ~HeapObjectVisitor() {} |
| virtual bool Visit(HeapObject* object) = 0; |
| }; |
| +class MigrationObserver { |
| + public: |
| + explicit MigrationObserver(Heap* heap) : heap_(heap) {} |
| + |
| + virtual ~MigrationObserver() {} |
| + virtual inline void Move(HeapObject* src, HeapObject* dst) {} |
| + |
| + protected: |
| + Heap* heap_; |
| +}; |
| + |
| +class YoungGenerationMigrationObserver : public MigrationObserver { |
| + public: |
| + YoungGenerationMigrationObserver( |
| + Heap* heap, MarkCompactCollector* mark_compact_collector, |
| + std::vector<HeapObject*>* black_allocation_objects) |
| + : MigrationObserver(heap), |
| + mark_compact_collector_(mark_compact_collector), |
| + black_allocation_objects_(black_allocation_objects) {} |
| + |
| + inline void Move(HeapObject* src, HeapObject* dst) final { |
| + // Migrate color to old generation marking in case the object survived young |
| + // generation garbage collection. |
| + if (heap_->incremental_marking()->IsMarking()) { |
| + const MarkingState state = mark_compact_collector_->marking_state(dst); |
| + if (heap_->incremental_marking()->black_allocation() && |
| + ObjectMarking::IsBlack(dst, state)) { |
|
ulan
2017/04/26 09:51:28
if (ObjectMarking::IsBlack(dst, state)) {
DCHECK
Michael Lippautz
2017/05/02 11:22:00
Done.
|
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + black_allocation_objects_->push_back(dst); |
| + } |
| + |
| + // Transfer old generation marking state. |
| + if (!ObjectMarking::IsBlack(dst, state)) { |
| + IncrementalMarking::TransferColor<MarkBit::ATOMIC>(src, dst); |
| + } |
| + } |
| + } |
| + |
| + protected: |
| + base::Mutex mutex_; |
| + MarkCompactCollector* mark_compact_collector_; |
| + std::vector<HeapObject*>* black_allocation_objects_; |
| +}; |
| + |
| class EvacuateVisitorBase : public HeapObjectVisitor { |
| protected: |
| enum MigrationMode { kFast, kProfiled }; |
| EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, |
| - RecordMigratedSlotVisitor* record_visitor) |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| : heap_(heap), |
| compaction_spaces_(compaction_spaces), |
| record_visitor_(record_visitor), |
| + migration_observer_(migration_observer), |
| profiling_( |
| heap->isolate()->is_profiling() || |
| heap->isolate()->logger()->is_logging_code_events() || |
| @@ -1809,6 +1982,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor { |
| PROFILE(heap_->isolate(), |
| CodeMoveEvent(AbstractCode::cast(src), dst_addr)); |
| } |
| + migration_observer_->Move(src, dst); |
| dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); |
| } else if (dest == CODE_SPACE) { |
| DCHECK_CODEOBJECT_SIZE(size, heap_->code_space()); |
| @@ -1818,12 +1992,13 @@ class EvacuateVisitorBase : public HeapObjectVisitor { |
| } |
| heap_->CopyBlock(dst_addr, src_addr, size); |
| Code::cast(dst)->Relocate(dst_addr - src_addr); |
| - RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); |
| + migration_observer_->Move(src, dst); |
| dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); |
| } else { |
| DCHECK_OBJECT_SIZE(size); |
| DCHECK(dest == NEW_SPACE); |
| heap_->CopyBlock(dst_addr, src_addr, size); |
| + migration_observer_->Move(src, dst); |
| } |
| if (mode == kProfiled) { |
| heap_->OnMoveEvent(dst, src, size); |
| @@ -1855,6 +2030,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor { |
| Heap* heap_; |
| CompactionSpaceCollection* compaction_spaces_; |
| RecordMigratedSlotVisitor* record_visitor_; |
| + MigrationObserver* migration_observer_; |
| bool profiling_; |
| }; |
| @@ -1866,8 +2042,10 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { |
| explicit EvacuateNewSpaceVisitor(Heap* heap, |
| CompactionSpaceCollection* compaction_spaces, |
| RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer, |
| base::HashMap* local_pretenuring_feedback) |
| - : EvacuateVisitorBase(heap, compaction_spaces, record_visitor), |
| + : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, |
| + migration_observer), |
| buffer_(LocalAllocationBuffer::InvalidBuffer()), |
| space_to_allocate_(NEW_SPACE), |
| promoted_size_(0), |
| @@ -2050,8 +2228,10 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { |
| public: |
| EvacuateOldSpaceVisitor(Heap* heap, |
| CompactionSpaceCollection* compaction_spaces, |
| - RecordMigratedSlotVisitor* record_visitor) |
| - : EvacuateVisitorBase(heap, compaction_spaces, record_visitor) {} |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| + : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, |
| + migration_observer) {} |
| inline bool Visit(HeapObject* object) override { |
| CompactionSpace* target_space = compaction_spaces_->Get( |
| @@ -2411,7 +2591,7 @@ static bool IsUnmarkedObject(Heap* heap, Object** p) { |
| DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); |
| return heap->InNewSpace(*p) && |
| !ObjectMarking::IsBlack(HeapObject::cast(*p), |
| - MarkingState::Internal(HeapObject::cast(*p))); |
| + MarkingState::External(HeapObject::cast(*p))); |
| } |
| void MinorMarkCompactCollector::MarkLiveObjects() { |
| @@ -2463,8 +2643,8 @@ void MinorMarkCompactCollector::MarkLiveObjects() { |
| &IsUnmarkedObject); |
| isolate() |
| ->global_handles() |
| - ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>( |
| - &root_visitor); |
| + ->IterateNewSpaceWeakUnmodifiedRoots< |
| + GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor); |
| ProcessMarkingDeque(); |
| } |
| @@ -2496,14 +2676,123 @@ void MinorMarkCompactCollector::EmptyMarkingDeque() { |
| } |
| void MinorMarkCompactCollector::CollectGarbage() { |
| - MarkLiveObjects(); |
| + heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); |
| + MarkLiveObjects(); |
| + ClearNonLiveReferences(); |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| YoungGenerationMarkingVerifier verifier(heap()); |
| verifier.Run(); |
| } |
| #endif // VERIFY_HEAP |
| + |
| + std::vector<HeapObject*> black_allocation_objects; |
| + EvacuateNewSpace(&black_allocation_objects); |
| +#ifdef VERIFY_HEAP |
| + if (FLAG_verify_heap) { |
| + YoungGenerationEvacuationVerifier verifier(heap()); |
| + verifier.Run(); |
| + } |
| +#endif // VERIFY_HEAP |
| + |
| + heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); |
| + |
| + // Process black allocation objects after updating pointers as we otherwise |
| + // would end up with objects on the marking deque that potentially forward |
| + // to white objects. |
| + // TODO(mlippautz): Instead of processing them explicitly, we should just add |
| + // them to the marking deque for further processing. |
| + { |
| + TRACE_GC(heap()->tracer(), |
| + GCTracer::Scope::MINOR_MC_EVACUATE_PROCESS_BLACK_ALLOCATION); |
| + for (HeapObject* object : black_allocation_objects) { |
| + CHECK(ObjectMarking::IsBlack(object, MarkingState::Internal(object))); |
| + heap()->incremental_marking()->IterateBlackObject(object); |
| + } |
| + heap()->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); |
| + for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), |
| + heap()->new_space()->FromSpaceEnd())) { |
| + marking_state(p).ClearLiveness(); |
| + } |
| + } |
| +} |
| + |
| +void MinorMarkCompactCollector::ClearNonLiveReferences() { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); |
| + // Internalized strings are always stored in old space, so there is no need |
| + // to clean them here. |
| + YoungGenerationExternalStringTableCleaner external_visitor(*this); |
| + heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); |
| + heap()->external_string_table_.CleanUpNewSpaceStrings(); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); |
| + // Process the weak references. |
| + MinorMarkCompactWeakObjectRetainer retainer(*this); |
| + heap()->ProcessYoungWeakReferences(&retainer); |
| + } |
| +} |
| + |
| +void MinorMarkCompactCollector::EvacuatePrologue() { |
| + NewSpace* new_space = heap()->new_space(); |
| + // Append the list of new space pages to be processed. |
| + for (Page* p : PageRange(new_space->bottom(), new_space->top())) { |
| + new_space_evacuation_pages_.Add(p); |
| + } |
| + new_space->Flip(); |
| + new_space->ResetAllocationInfo(); |
| +} |
| + |
| +void MinorMarkCompactCollector::EvacuateEpilogue() { |
| + heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
| +} |
| + |
| +void MinorMarkCompactCollector::EvacuateNewSpace( |
| + std::vector<HeapObject*>* black_allocation_objects) { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| + Heap::RelocationLock relocation_lock(heap()); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); |
| + EvacuatePrologue(); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
| + EvacuatePagesInParallel(black_allocation_objects); |
| + } |
| + |
| + UpdatePointersAfterEvacuation(); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); |
| + if (!heap()->new_space()->Rebalance()) { |
| + FatalProcessOutOfMemory("NewSpace::Rebalance"); |
| + } |
| + } |
| + |
| + // Give pages that are queued to be freed back to the OS. |
| + heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| + // TODO(mlippautz): Implement page promotion. |
| + new_space_evacuation_pages_.Rewind(0); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); |
| + EvacuateEpilogue(); |
| + } |
| } |
| void MarkCompactCollector::MarkLiveObjects() { |
| @@ -3183,18 +3472,20 @@ class Evacuator : public Malloced { |
| return Page::kAllocatableMemory + kPointerSize; |
| } |
| - Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) |
| + Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| : heap_(heap), |
| compaction_spaces_(heap_), |
| local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), |
| new_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| - &local_pretenuring_feedback_), |
| + migration_observer, &local_pretenuring_feedback_), |
| new_to_new_page_visitor_(heap_, record_visitor, |
| &local_pretenuring_feedback_), |
| new_to_old_page_visitor_(heap_, record_visitor, |
| &local_pretenuring_feedback_), |
| - old_space_visitor_(heap_, &compaction_spaces_, record_visitor), |
| + old_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| + migration_observer), |
| duration_(0.0), |
| bytes_compacted_(0) {} |
| @@ -3290,8 +3581,10 @@ void Evacuator::Finalize() { |
| class FullEvacuator : public Evacuator { |
| public: |
| FullEvacuator(MarkCompactCollector* collector, |
| - RecordMigratedSlotVisitor* record_visitor) |
| - : Evacuator(collector->heap(), record_visitor), collector_(collector) {} |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| + : Evacuator(collector->heap(), record_visitor, migration_observer), |
| + collector_(collector) {} |
| protected: |
| bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; |
| @@ -3355,9 +3648,57 @@ bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) { |
| return success; |
| } |
| +class YoungGenerationEvacuator : public Evacuator { |
| + public: |
| + YoungGenerationEvacuator(MinorMarkCompactCollector* collector, |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| + : Evacuator(collector->heap(), record_visitor, migration_observer), |
| + collector_(collector) {} |
| + |
| + protected: |
| + bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override; |
| + |
| + MinorMarkCompactCollector* collector_; |
| +}; |
| + |
| +bool YoungGenerationEvacuator::RawEvacuatePage(Page* page, |
| + intptr_t* live_bytes) { |
| + bool success = false; |
| + LiveObjectVisitor object_visitor; |
| + const MarkingState state = collector_->marking_state(page); |
| + *live_bytes = state.live_bytes(); |
| + switch (ComputeEvacuationMode(page)) { |
| + case kObjectsNewToOld: |
| + success = object_visitor.VisitBlackObjects( |
| + page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); |
| + DCHECK(success); |
| + ArrayBufferTracker::ProcessBuffers( |
| + page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| + break; |
| + case kPageNewToOld: |
| + // TODO(mlippautz): Implement page promotion. |
| + UNREACHABLE(); |
| + break; |
| + case kPageNewToNew: |
| + // TODO(mlippautz): Implement page promotion. |
| + UNREACHABLE(); |
| + break; |
| + case kObjectsOldToOld: |
| + UNREACHABLE(); |
| + break; |
| + } |
| + return success; |
| +} |
| + |
| class EvacuationJobTraits { |
| public: |
| - typedef int* PerPageData; // Pointer to number of aborted pages. |
| + struct PageData { |
| + int* abandoned_pages; // Pointer to number of aborted pages. |
| + MarkingState marking_state; |
| + }; |
| + |
| + typedef PageData PerPageData; |
| typedef Evacuator* PerTaskData; |
| static const bool NeedSequentialFinalization = true; |
| @@ -3390,7 +3731,7 @@ class EvacuationJobTraits { |
| p->ClearEvacuationCandidate(); |
| // Slots have already been recorded so we just need to add it to the |
| // sweeper, which will happen after updating pointers. |
| - *data += 1; |
| + *data.abandoned_pages += 1; |
| } |
| break; |
| default: |
| @@ -3399,6 +3740,51 @@ class EvacuationJobTraits { |
| } |
| }; |
| +template <class Evacuator, class Collector> |
| +void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks( |
| + Collector* collector, PageParallelJob<EvacuationJobTraits>* job, |
| + RecordMigratedSlotVisitor* record_visitor, MigrationObserver* observer, |
| + const intptr_t live_bytes, const int& abandoned_pages) { |
| + // Used for trace summary. |
| + double compaction_speed = 0; |
| + if (FLAG_trace_evacuation) { |
| + compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| + } |
| + |
| + const int wanted_num_tasks = |
| + NumberOfParallelCompactionTasks(job->NumberOfPages(), live_bytes); |
| + Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; |
| + for (int i = 0; i < wanted_num_tasks; i++) { |
| + evacuators[i] = new Evacuator(collector, record_visitor, observer); |
| + } |
| + job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
| + const Address top = heap()->new_space()->top(); |
| + for (int i = 0; i < wanted_num_tasks; i++) { |
| + evacuators[i]->Finalize(); |
| + // Try to find the last LAB that was used for new space allocation in |
| + // evacuation tasks. If it was adjacent to the current top, move top back. |
| + const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); |
| + if (info.limit() != nullptr && info.limit() == top) { |
| + DCHECK_NOT_NULL(info.top()); |
| + *heap()->new_space()->allocation_top_address() = info.top(); |
| + } |
| + delete evacuators[i]; |
| + } |
| + delete[] evacuators; |
| + |
| + if (FLAG_trace_evacuation) { |
| + PrintIsolate(isolate(), |
| + "%8.0f ms: evacuation-summary: parallel=%s pages=%d " |
| + "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS |
| + " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", |
| + isolate()->time_millis_since_init(), |
| + FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(), |
| + abandoned_pages, wanted_num_tasks, job->NumberOfTasks(), |
| + V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), |
| + live_bytes, compaction_speed); |
| + } |
| +} |
| + |
| void MarkCompactCollector::EvacuatePagesInParallel() { |
| PageParallelJob<EvacuationJobTraits> job( |
| heap_, heap_->isolate()->cancelable_task_manager(), |
| @@ -3408,7 +3794,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| intptr_t live_bytes = 0; |
| for (Page* page : old_space_evacuation_pages_) { |
| live_bytes += MarkingState::Internal(page).live_bytes(); |
| - job.AddPage(page, &abandoned_pages); |
| + job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| } |
| const bool reduce_memory = heap()->ShouldReduceMemory(); |
| @@ -3427,49 +3813,38 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| } |
| } |
| - job.AddPage(page, &abandoned_pages); |
| + job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| } |
| DCHECK_GE(job.NumberOfPages(), 1); |
| - // Used for trace summary. |
| - double compaction_speed = 0; |
| - if (FLAG_trace_evacuation) { |
| - compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| - } |
| - |
| - const int wanted_num_tasks = |
| - NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); |
| - FullEvacuator** evacuators = new FullEvacuator*[wanted_num_tasks]; |
| + MigrationObserver observer(heap()); |
| RecordMigratedSlotVisitor record_visitor(this); |
| - for (int i = 0; i < wanted_num_tasks; i++) { |
| - evacuators[i] = new FullEvacuator(this, &record_visitor); |
| - } |
| - job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
| - const Address top = heap()->new_space()->top(); |
| - for (int i = 0; i < wanted_num_tasks; i++) { |
| - evacuators[i]->Finalize(); |
| - // Try to find the last LAB that was used for new space allocation in |
| - // evacuation tasks. If it was adjacent to the current top, move top back. |
| - const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); |
| - if (info.limit() != nullptr && info.limit() == top) { |
| - DCHECK_NOT_NULL(info.top()); |
| - *heap()->new_space()->allocation_top_address() = info.top(); |
| - } |
| - delete evacuators[i]; |
| - } |
| - delete[] evacuators; |
| + CreateAndExecuteEvacuationTasks<FullEvacuator>( |
| + this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); |
| +} |
| - if (FLAG_trace_evacuation) { |
| - PrintIsolate(isolate(), |
| - "%8.0f ms: evacuation-summary: parallel=%s pages=%d " |
| - "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS |
| - " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", |
| - isolate()->time_millis_since_init(), |
| - FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), |
| - abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), |
| - V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), |
| - live_bytes, compaction_speed); |
| +void MinorMarkCompactCollector::EvacuatePagesInParallel( |
| + std::vector<HeapObject*>* black_allocation_objects) { |
| + PageParallelJob<EvacuationJobTraits> job( |
| + heap_, heap_->isolate()->cancelable_task_manager(), |
| + &page_parallel_job_semaphore_); |
| + int abandoned_pages = 0; |
| + intptr_t live_bytes = 0; |
| + |
| + for (Page* page : new_space_evacuation_pages_) { |
| + intptr_t live_bytes_on_page = marking_state(page).live_bytes(); |
| + live_bytes += live_bytes_on_page; |
| + // TODO(mlippautz): Implement page promotion. |
| + job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| } |
| + DCHECK_GE(job.NumberOfPages(), 1); |
| + |
| + YoungGenerationMigrationObserver observer( |
| + heap(), heap()->mark_compact_collector(), black_allocation_objects); |
| + YoungGenerationRecordMigratedSlotVisitor record_visitor( |
| + heap()->mark_compact_collector()); |
| + CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>( |
| + this, &job, &record_visitor, &observer, live_bytes, abandoned_pages); |
| } |
| class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| @@ -3794,6 +4169,7 @@ class PointerUpdateJobTraits { |
| private: |
| static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
| + base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); |
| if (type == OLD_TO_NEW) { |
| RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
| return CheckAndUpdateOldToNewSlot(heap, slot); |
| @@ -3900,17 +4276,23 @@ void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| class ToSpacePointerUpdateJobTraits { |
| public: |
| - typedef std::pair<Address, Address> PerPageData; |
| + struct PageData { |
| + Address start; |
| + Address end; |
| + MarkingState marking_state; |
| + }; |
| + |
| + typedef PageData PerPageData; |
| typedef PointersUpdatingVisitor* PerTaskData; |
| static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
| - MemoryChunk* chunk, PerPageData limits) { |
| + MemoryChunk* chunk, PerPageData page_data) { |
| if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| // New->new promoted pages contain garbage so they require iteration |
| // using markbits. |
| - ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); |
| + ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data); |
| } else { |
| - ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); |
| + ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); |
| } |
| return true; |
| } |
| @@ -3922,8 +4304,8 @@ class ToSpacePointerUpdateJobTraits { |
| private: |
| static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
| MemoryChunk* chunk, |
| - PerPageData limits) { |
| - for (Address cur = limits.first; cur < limits.second;) { |
| + PerPageData page_data) { |
| + for (Address cur = page_data.start; cur < page_data.end;) { |
| HeapObject* object = HeapObject::FromAddress(cur); |
| Map* map = object->map(); |
| int size = object->SizeFromMap(map); |
| @@ -3934,8 +4316,8 @@ class ToSpacePointerUpdateJobTraits { |
| static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
| MemoryChunk* chunk, |
| - PerPageData limits) { |
| - LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk)); |
| + PerPageData page_data) { |
| + LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state); |
| HeapObject* object = NULL; |
| while ((object = it.Next()) != NULL) { |
| Map* map = object->map(); |
| @@ -3945,7 +4327,10 @@ class ToSpacePointerUpdateJobTraits { |
| } |
| }; |
| -void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| +template <class MarkingStateProvider> |
| +void UpdateToSpacePointersInParallel( |
| + Heap* heap, base::Semaphore* semaphore, |
| + const MarkingStateProvider& marking_state_provider) { |
| PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
| heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| Address space_start = heap->new_space()->bottom(); |
| @@ -3954,7 +4339,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| Address start = |
| page->Contains(space_start) ? space_start : page->area_start(); |
| Address end = page->Contains(space_end) ? space_end : page->area_end(); |
| - job.AddPage(page, std::make_pair(start, end)); |
| + job.AddPage(page, {start, end, marking_state_provider.marking_state(page)}); |
| } |
| PointersUpdatingVisitor visitor; |
| int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
| @@ -3968,7 +4353,8 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| { |
| TRACE_GC(heap()->tracer(), |
| GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| - UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); |
| + UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| + *this); |
| // Update roots. |
| PointersUpdatingVisitor updating_visitor; |
| heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| @@ -3994,6 +4380,37 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| } |
| } |
| +void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
| + |
| + PointersUpdatingVisitor updating_visitor; |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), |
| + GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| + UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| + *this); |
| + // TODO(mlippautz): Iteration mode is not optimal as we process all |
| + // global handles. Find a way to only process the ones related to new |
| + // space. |
| + heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| + UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), |
| + GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
| + |
| + EvacuationWeakObjectRetainer evacuation_object_retainer; |
| + heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
| + |
| + // Update pointers from external string table. |
| + heap()->UpdateNewSpaceReferencesInExternalStringTable( |
| + &UpdateReferenceInExternalStringTableEntry); |
| + heap()->IterateEncounteredWeakCollections(&updating_visitor); |
| + heap()->set_encountered_weak_collections(Smi::kZero); |
| + } |
| +} |
| void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| for (Page* p : old_space_evacuation_pages_) { |