Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index fe886ac8136ebe99d1924a9b9250aa357038eadf..8d0416349ac6ad2645e65e17640e898d0f5daddd 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -210,15 +210,6 @@ class EvacuationVerifier : public ObjectVisitor { |
| public: |
| virtual void Run() = 0; |
| - void VisitPointers(Object** start, Object** end) override { |
| - for (Object** current = start; current < end; current++) { |
| - if ((*current)->IsHeapObject()) { |
| - HeapObject* object = HeapObject::cast(*current); |
| - CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
| - } |
| - } |
| - } |
| - |
| protected: |
| explicit EvacuationVerifier(Heap* heap) : heap_(heap) {} |
| @@ -277,6 +268,36 @@ class FullEvacuationVerifier : public EvacuationVerifier { |
| VerifyEvacuation(heap_->code_space()); |
| VerifyEvacuation(heap_->map_space()); |
| } |
| + |
| + void VisitPointers(Object** start, Object** end) override { |
| + for (Object** current = start; current < end; current++) { |
| + if ((*current)->IsHeapObject()) { |
| + HeapObject* object = HeapObject::cast(*current); |
| + CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
| + } |
| + } |
| + } |
| +}; |
| + |
| +class YoungGenerationEvacuationVerifier : public EvacuationVerifier { |
| + public: |
| + explicit YoungGenerationEvacuationVerifier(Heap* heap) |
| + : EvacuationVerifier(heap) {} |
| + |
| + void Run() override { |
| + VerifyRoots(VISIT_ALL_IN_SCAVENGE); |
| + VerifyEvacuation(heap_->new_space()); |
| + } |
| + |
| + void VisitPointers(Object** start, Object** end) override { |
| + for (Object** current = start; current < end; current++) { |
| + if ((*current)->IsHeapObject()) { |
| + HeapObject* object = HeapObject::cast(*current); |
| + if (!heap_->InNewSpace(object)) return; |
| + CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
| + } |
| + } |
| + } |
| }; |
| } // namespace |
| @@ -1548,6 +1569,64 @@ class StringTableCleaner : public ObjectVisitor { |
| typedef StringTableCleaner<false, true> InternalizedStringTableCleaner; |
| typedef StringTableCleaner<true, false> ExternalStringTableCleaner; |
| +// Helper class for pruning the string table. |
| +class YoungGenerationExternalStringTableCleaner : public ObjectVisitor { |
| + public: |
| + YoungGenerationExternalStringTableCleaner( |
| + const MinorMarkCompactCollector& collector) |
| + : heap_(collector.heap()), collector_(collector) {} |
| + |
| + void VisitPointers(Object** start, Object** end) override { |
| + // Visit all HeapObject pointers in [start, end). |
| + for (Object** p = start; p < end; p++) { |
| + Object* o = *p; |
| + if (o->IsHeapObject()) { |
| + HeapObject* heap_object = HeapObject::cast(o); |
| + if (ObjectMarking::IsWhite(heap_object, |
| + collector_.marking_state(heap_object))) { |
| + if (o->IsExternalString()) { |
| + heap_->FinalizeExternalString(String::cast(*p)); |
| + } else { |
| + // The original external string may have been internalized. |
| + DCHECK(o->IsThinString()); |
| + } |
| + // Set the entry to the_hole_value (as deleted). |
| + *p = heap_->the_hole_value(); |
| + } |
| + } |
| + } |
| + } |
| + |
| + private: |
| + Heap* heap_; |
| + const MinorMarkCompactCollector& collector_; |
| +}; |
| + |
| +// Marked young generation objects and all old generation objects will be |
| +// retained. |
| +class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| + public: |
| + explicit MinorMarkCompactWeakObjectRetainer( |
| + const MinorMarkCompactCollector& collector) |
| + : collector_(collector) {} |
| + |
| + virtual Object* RetainAs(Object* object) { |
| + HeapObject* heap_object = HeapObject::cast(object); |
| + if (!collector_.heap()->InNewSpace(heap_object)) return object; |
| + |
| + DCHECK(!ObjectMarking::IsGrey(heap_object, |
| + MarkingState::External(heap_object))); |
| + if (ObjectMarking::IsBlack(heap_object, |
| + collector_.marking_state(heap_object))) { |
| + return object; |
| + } |
| + return nullptr; |
| + } |
| + |
| + private: |
| + const MinorMarkCompactCollector& collector_; |
| +}; |
| + |
| // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
| // are retained. |
| class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| @@ -1606,10 +1685,23 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { |
| } |
| } |
| -class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| +class RecordMigratedSlotVisitor : public ObjectVisitor { |
| public: |
| + class HostScope { |
| + public: |
| + HostScope(RecordMigratedSlotVisitor* visitor, HeapObject* object) |
| + : visitor_(visitor) { |
| + DCHECK_NOT_NULL(object); |
| + visitor_->set_host(object); |
| + } |
| + ~HostScope() { visitor_->set_host(nullptr); } |
| + |
| + private: |
| + RecordMigratedSlotVisitor* visitor_; |
| + }; |
| + |
| explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector) |
| - : collector_(collector) {} |
| + : collector_(collector), host_(nullptr) {} |
| inline void VisitPointer(Object** p) final { |
| RecordMigratedSlot(*p, reinterpret_cast<Address>(p)); |
| @@ -1622,7 +1714,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| } |
| } |
| - inline void VisitCodeEntry(Address code_entry_slot) final { |
| + inline void VisitCodeEntry(Address code_entry_slot) override { |
| Address code_entry = Memory::Address_at(code_entry_slot); |
| if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
| RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), |
| @@ -1631,7 +1723,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| } |
| } |
| - inline void VisitCodeTarget(RelocInfo* rinfo) final { |
| + inline void VisitCodeTarget(RelocInfo* rinfo) override { |
| DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); |
| Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
| Code* host = rinfo->host(); |
| @@ -1641,7 +1733,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| collector_->RecordRelocSlot(host, rinfo, target); |
| } |
| - inline void VisitDebugTarget(RelocInfo* rinfo) final { |
| + inline void VisitDebugTarget(RelocInfo* rinfo) override { |
| DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
| rinfo->IsPatchedDebugBreakSlotSequence()); |
| Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address()); |
| @@ -1652,7 +1744,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| collector_->RecordRelocSlot(host, rinfo, target); |
| } |
| - inline void VisitEmbeddedPointer(RelocInfo* rinfo) final { |
| + inline void VisitEmbeddedPointer(RelocInfo* rinfo) override { |
| DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); |
| HeapObject* object = HeapObject::cast(rinfo->target_object()); |
| Code* host = rinfo->host(); |
| @@ -1660,7 +1752,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| collector_->RecordRelocSlot(host, rinfo, object); |
| } |
| - inline void VisitCell(RelocInfo* rinfo) final { |
| + inline void VisitCell(RelocInfo* rinfo) override { |
| DCHECK(rinfo->rmode() == RelocInfo::CELL); |
| Cell* cell = rinfo->target_cell(); |
| Code* host = rinfo->host(); |
| @@ -1671,7 +1763,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| } |
| // Entries that will never move. |
| - inline void VisitCodeAgeSequence(RelocInfo* rinfo) final { |
| + inline void VisitCodeAgeSequence(RelocInfo* rinfo) override { |
| DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); |
| Code* stub = rinfo->code_age_stub(); |
| USE(stub); |
| @@ -1684,8 +1776,10 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| inline void VisitRuntimeEntry(RelocInfo* rinfo) final {} |
| inline void VisitInternalReference(RelocInfo* rinfo) final {} |
| - private: |
| - inline void RecordMigratedSlot(Object* value, Address slot) { |
| + protected: |
| + void set_host(HeapObject* host) { host_ = host; } |
| + |
| + inline virtual void RecordMigratedSlot(Object* value, Address slot) { |
| if (value->IsHeapObject()) { |
| Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); |
| if (p->InNewSpace()) { |
| @@ -1697,6 +1791,50 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| } |
| MarkCompactCollector* collector_; |
| + HeapObject* host_; |
| +}; |
| + |
| +class YoungGenerationRecordMigratedSlotVisitor final |
| + : public RecordMigratedSlotVisitor { |
| + public: |
| + explicit YoungGenerationRecordMigratedSlotVisitor( |
| + MarkCompactCollector* collector) |
| + : RecordMigratedSlotVisitor(collector) {} |
| + |
| + inline void VisitCodeEntry(Address code_entry_slot) final { |
| + Address code_entry = Memory::Address_at(code_entry_slot); |
| + if (Page::FromAddress(code_entry)->IsEvacuationCandidate() && |
| + IsHostObjectLive()) { |
| + RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot), |
| + nullptr, CODE_ENTRY_SLOT, |
| + code_entry_slot); |
| + } |
| + } |
| + |
| + inline void VisitCodeTarget(RelocInfo* rinfo) final { UNREACHABLE(); } |
| + inline void VisitDebugTarget(RelocInfo* rinfo) final { UNREACHABLE(); } |
| + inline void VisitEmbeddedPointer(RelocInfo* rinfo) final { UNREACHABLE(); } |
| + inline void VisitCell(RelocInfo* rinfo) final { UNREACHABLE(); } |
| + inline void VisitCodeAgeSequence(RelocInfo* rinfo) final { UNREACHABLE(); } |
| + |
| + private: |
| + // Only record slots for host objects that are considered as live by the full |
| + // collector. |
| + inline bool IsHostObjectLive() { |
| + DCHECK_NOT_NULL(host_); |
| + return ObjectMarking::IsBlack(host_, collector_->marking_state(host_)); |
| + } |
| + |
| + inline void RecordMigratedSlot(Object* value, Address slot) final { |
| + if (value->IsHeapObject()) { |
| + Page* p = Page::FromAddress(reinterpret_cast<Address>(value)); |
| + if (p->InNewSpace()) { |
| + RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot); |
| + } else if (p->IsEvacuationCandidate() && IsHostObjectLive()) { |
| + RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot); |
| + } |
| + } |
| + } |
| }; |
| class HeapObjectVisitor { |
| @@ -1705,15 +1843,61 @@ class HeapObjectVisitor { |
| virtual bool Visit(HeapObject* object) = 0; |
| }; |
| +class MigrationObserver { |
| + public: |
| + explicit MigrationObserver(Heap* heap) : heap_(heap) {} |
| + |
| + virtual ~MigrationObserver() {} |
| + virtual inline void Move(HeapObject* src, HeapObject* dst) {} |
| + |
| + protected: |
| + Heap* heap_; |
| +}; |
| + |
| +class YoungGenerationMigrationObserver : public MigrationObserver { |
| + public: |
| + YoungGenerationMigrationObserver( |
| + Heap* heap, MarkCompactCollector* mark_compact_collector, |
| + std::vector<HeapObject*>* black_allocation_objects) |
| + : MigrationObserver(heap), |
| + mark_compact_collector_(mark_compact_collector), |
| + black_allocation_objects_(black_allocation_objects) {} |
| + |
| + inline void Move(HeapObject* src, HeapObject* dst) final { |
| + // Migrate color to old generation marking in case the object survived young |
| + // generation garbage collection. |
| + if (heap_->incremental_marking()->IsMarking()) { |
| + const MarkingState state = mark_compact_collector_->marking_state(dst); |
| + if (heap_->incremental_marking()->black_allocation() && |
| + ObjectMarking::IsBlack(dst, state)) { |
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + black_allocation_objects_->push_back(dst); |
| + } |
| + |
| + // Transfer old generation marking state. |
| + if (!ObjectMarking::IsBlack(dst, state)) { |
| + IncrementalMarking::TransferColor<MarkBit::ATOMIC>(src, dst); |
| + } |
| + } |
| + } |
| + |
| + protected: |
| + base::Mutex mutex_; |
| + MarkCompactCollector* mark_compact_collector_; |
| + std::vector<HeapObject*>* black_allocation_objects_; |
| +}; |
| + |
| class EvacuateVisitorBase : public HeapObjectVisitor { |
| protected: |
| enum MigrationMode { kFast, kProfiled }; |
| EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, |
| - RecordMigratedSlotVisitor* record_visitor) |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| : heap_(heap), |
| compaction_spaces_(compaction_spaces), |
| record_visitor_(record_visitor), |
| + migration_observer_(migration_observer), |
| profiling_( |
| heap->isolate()->is_profiling() || |
| heap->isolate()->logger()->is_logging_code_events() || |
| @@ -1758,7 +1942,12 @@ class EvacuateVisitorBase : public HeapObjectVisitor { |
| PROFILE(heap_->isolate(), |
| CodeMoveEvent(AbstractCode::cast(src), dst_addr)); |
| } |
| - dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); |
| + migration_observer_->Move(src, dst); |
| + { |
| + RecordMigratedSlotVisitor::HostScope host_scope(record_visitor_, dst); |
|
Michael Lippautz
2017/04/21 07:05:52
This hack and all its consequences (stateful visit
Hannes Payer (out of office)
2017/04/21 14:46:26
Nice!
|
| + dst->IterateBodyFast(dst->map()->instance_type(), size, |
| + record_visitor_); |
| + } |
| } else if (dest == CODE_SPACE) { |
| DCHECK_CODEOBJECT_SIZE(size, heap_->code_space()); |
| if (mode == kProfiled) { |
| @@ -1767,12 +1956,17 @@ class EvacuateVisitorBase : public HeapObjectVisitor { |
| } |
| heap_->CopyBlock(dst_addr, src_addr, size); |
| Code::cast(dst)->Relocate(dst_addr - src_addr); |
| - RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); |
| - dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_); |
| + migration_observer_->Move(src, dst); |
|
Michael Lippautz
2017/04/21 07:05:52
I plan to make the use of MigrationObserver templa
Hannes Payer (out of office)
2017/04/21 14:46:26
During evacuation you mean, right?
Michael Lippautz
2017/04/24 13:15:08
Yes.
|
| + { |
| + RecordMigratedSlotVisitor::HostScope host_scope(record_visitor_, dst); |
| + dst->IterateBodyFast(dst->map()->instance_type(), size, |
| + record_visitor_); |
| + } |
| } else { |
| DCHECK_OBJECT_SIZE(size); |
| DCHECK(dest == NEW_SPACE); |
| heap_->CopyBlock(dst_addr, src_addr, size); |
| + migration_observer_->Move(src, dst); |
| } |
| if (mode == kProfiled) { |
| heap_->OnMoveEvent(dst, src, size); |
| @@ -1804,6 +1998,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor { |
| Heap* heap_; |
| CompactionSpaceCollection* compaction_spaces_; |
| RecordMigratedSlotVisitor* record_visitor_; |
| + MigrationObserver* migration_observer_; |
| bool profiling_; |
| }; |
| @@ -1815,8 +2010,10 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase { |
| explicit EvacuateNewSpaceVisitor(Heap* heap, |
| CompactionSpaceCollection* compaction_spaces, |
| RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer, |
| base::HashMap* local_pretenuring_feedback) |
| - : EvacuateVisitorBase(heap, compaction_spaces, record_visitor), |
| + : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, |
| + migration_observer), |
| buffer_(LocalAllocationBuffer::InvalidBuffer()), |
| space_to_allocate_(NEW_SPACE), |
| promoted_size_(0), |
| @@ -1999,8 +2196,10 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase { |
| public: |
| EvacuateOldSpaceVisitor(Heap* heap, |
| CompactionSpaceCollection* compaction_spaces, |
| - RecordMigratedSlotVisitor* record_visitor) |
| - : EvacuateVisitorBase(heap, compaction_spaces, record_visitor) {} |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| + : EvacuateVisitorBase(heap, compaction_spaces, record_visitor, |
| + migration_observer) {} |
| inline bool Visit(HeapObject* object) override { |
| CompactionSpace* target_space = compaction_spaces_->Get( |
| @@ -2359,7 +2558,7 @@ static bool IsUnmarkedObject(Heap* heap, Object** p) { |
| DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p)); |
| return heap->InNewSpace(*p) && |
| !ObjectMarking::IsBlack(HeapObject::cast(*p), |
| - MarkingState::Internal(HeapObject::cast(*p))); |
| + MarkingState::External(HeapObject::cast(*p))); |
| } |
| void MinorMarkCompactCollector::MarkLiveObjects() { |
| @@ -2411,8 +2610,8 @@ void MinorMarkCompactCollector::MarkLiveObjects() { |
| &IsUnmarkedObject); |
| isolate() |
| ->global_handles() |
| - ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>( |
| - &root_visitor); |
| + ->IterateNewSpaceWeakUnmodifiedRoots< |
| + GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&root_visitor); |
| ProcessMarkingDeque(); |
| } |
| @@ -2444,14 +2643,123 @@ void MinorMarkCompactCollector::EmptyMarkingDeque() { |
| } |
| void MinorMarkCompactCollector::CollectGarbage() { |
| - MarkLiveObjects(); |
| + heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted(); |
| + MarkLiveObjects(); |
| + ClearNonLiveReferences(); |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| YoungGenerationMarkingVerifier verifier(heap()); |
| verifier.Run(); |
| } |
| #endif // VERIFY_HEAP |
| + |
| + std::vector<HeapObject*> black_allocation_objects; |
| + EvacuateNewSpace(&black_allocation_objects); |
| +#ifdef VERIFY_HEAP |
| + if (FLAG_verify_heap) { |
| + YoungGenerationEvacuationVerifier verifier(heap()); |
| + verifier.Run(); |
| + } |
| +#endif // VERIFY_HEAP |
| + |
| + heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge(); |
| + |
| + // Process black allocation objects after updating pointers as we otherwise |
| + // would end up with objects on the marking deque that potentially forward |
| + // to white objects. |
| + // TODO(mlippautz): Instead of processing them explicitly, we should just add |
| + // them to the marking deque for further processing. |
| + { |
| + TRACE_GC(heap()->tracer(), |
| + GCTracer::Scope::MINOR_MC_EVACUATE_PROCESS_BLACK_ALLOCATION); |
| + for (HeapObject* object : black_allocation_objects) { |
| + CHECK(ObjectMarking::IsBlack(object, MarkingState::Internal(object))); |
| + heap()->incremental_marking()->IterateBlackObject(object); |
| + } |
| + heap()->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_LIVENESS); |
| + for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(), |
| + heap()->new_space()->FromSpaceEnd())) { |
| + marking_state(p).ClearLiveness(); |
| + } |
| + } |
| +} |
| + |
| +void MinorMarkCompactCollector::ClearNonLiveReferences() { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE); |
| + // Internalized strings are always stored in old space, so there is no need |
| + // to clean them here. |
| + YoungGenerationExternalStringTableCleaner external_visitor(*this); |
| + heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor); |
| + heap()->external_string_table_.CleanUpNewSpaceStrings(); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS); |
| + // Process the weak references. |
| + MinorMarkCompactWeakObjectRetainer retainer(*this); |
| + heap()->ProcessYoungWeakReferences(&retainer); |
| + } |
| +} |
| + |
| +void MinorMarkCompactCollector::EvacuatePrologue() { |
| + NewSpace* new_space = heap()->new_space(); |
| + // Append the list of new space pages to be processed. |
| + for (Page* p : PageRange(new_space->bottom(), new_space->top())) { |
| + new_space_evacuation_pages_.Add(p); |
| + } |
| + new_space->Flip(); |
| + new_space->ResetAllocationInfo(); |
| +} |
| + |
| +void MinorMarkCompactCollector::EvacuateEpilogue() { |
| + heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
| +} |
| + |
| +void MinorMarkCompactCollector::EvacuateNewSpace( |
| + std::vector<HeapObject*>* black_allocation_objects) { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| + Heap::RelocationLock relocation_lock(heap()); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE); |
| + EvacuatePrologue(); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
| + EvacuatePagesInParallel(black_allocation_objects); |
| + } |
| + |
| + UpdatePointersAfterEvacuation(); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE); |
| + if (!heap()->new_space()->Rebalance()) { |
| + FatalProcessOutOfMemory("NewSpace::Rebalance"); |
| + } |
| + } |
| + |
| + // Give pages that are queued to be freed back to the OS. |
| + heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| + // TODO(mlippautz): Implement page promotion. |
| + new_space_evacuation_pages_.Rewind(0); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE); |
| + EvacuateEpilogue(); |
| + } |
| } |
| void MarkCompactCollector::MarkLiveObjects() { |
| @@ -3123,24 +3431,27 @@ class Evacuator : public Malloced { |
| return Page::kAllocatableMemory + kPointerSize; |
| } |
| - Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) |
| + Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| : heap_(heap), |
| compaction_spaces_(heap_), |
| local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity), |
| new_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| - &local_pretenuring_feedback_), |
| + migration_observer, &local_pretenuring_feedback_), |
| new_to_new_page_visitor_(heap_, record_visitor, |
| &local_pretenuring_feedback_), |
| new_to_old_page_visitor_(heap_, record_visitor, |
| &local_pretenuring_feedback_), |
| - old_space_visitor_(heap_, &compaction_spaces_, record_visitor), |
| + old_space_visitor_(heap_, &compaction_spaces_, record_visitor, |
| + migration_observer), |
| duration_(0.0), |
| bytes_compacted_(0) {} |
| virtual ~Evacuator() {} |
| - virtual bool EvacuatePage(Page* page, const MarkingState& state) = 0; |
| + bool EvacuatePage(Page* page, const MarkingState& state); |
| + virtual bool EvacuatePageImpl(Page* page, const MarkingState& state) = 0; |
| // Merge back locally cached info sequentially. Note that this method needs |
| // to be called from the main thread. |
| @@ -3178,6 +3489,34 @@ class Evacuator : public Malloced { |
| intptr_t bytes_compacted_; |
| }; |
| +bool Evacuator::EvacuatePage(Page* page, const MarkingState& state) { |
| + bool success = false; |
| + DCHECK(page->SweepingDone()); |
| + intptr_t saved_live_bytes = state.live_bytes(); |
| + double evacuation_time = 0.0; |
| + { |
| + AlwaysAllocateScope always_allocate(heap()->isolate()); |
| + TimedScope timed_scope(&evacuation_time); |
| + success = EvacuatePageImpl(page, state); |
| + } |
| + ReportCompactionProgress(evacuation_time, saved_live_bytes); |
| + if (FLAG_trace_evacuation) { |
| + PrintIsolate( |
| + heap()->isolate(), |
| + "evacuation[%p]: page=%p new_space=%d " |
| + "page_evacuation=%d executable=%d contains_age_mark=%d " |
| + "live_bytes=%" V8PRIdPTR " time=%f page_promotion_qualifies=%d\n", |
| + static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(), |
| + page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || |
| + page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), |
| + page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), |
| + page->Contains(heap()->new_space()->age_mark()), saved_live_bytes, |
| + evacuation_time, |
| + saved_live_bytes > Evacuator::PageEvacuationThreshold()); |
| + } |
| + return success; |
| +} |
| + |
| void Evacuator::Finalize() { |
| heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
| heap()->code_space()->MergeCompactionSpace( |
| @@ -3198,130 +3537,122 @@ void Evacuator::Finalize() { |
| class FullEvacuator : public Evacuator { |
| public: |
| - FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) |
| - : Evacuator(heap, record_visitor) {} |
| + FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| + : Evacuator(heap, record_visitor, migration_observer) {} |
| - bool EvacuatePage(Page* page, const MarkingState& state) override; |
| + bool EvacuatePageImpl(Page* page, const MarkingState& state) override; |
| }; |
| -bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) { |
| +bool FullEvacuator::EvacuatePageImpl(Page* page, const MarkingState& state) { |
| bool success = false; |
| - DCHECK(page->SweepingDone()); |
| - intptr_t saved_live_bytes = state.live_bytes(); |
| - double evacuation_time = 0.0; |
| - { |
| - AlwaysAllocateScope always_allocate(heap()->isolate()); |
| - TimedScope timed_scope(&evacuation_time); |
| - LiveObjectVisitor object_visitor; |
| - switch (ComputeEvacuationMode(page)) { |
| - case kObjectsNewToOld: |
| - success = |
| - object_visitor.VisitBlackObjects(page, state, &new_space_visitor_, |
| - LiveObjectVisitor::kClearMarkbits); |
| + LiveObjectVisitor object_visitor; |
| + switch (ComputeEvacuationMode(page)) { |
| + case kObjectsNewToOld: |
| + success = object_visitor.VisitBlackObjects( |
| + page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); |
| + DCHECK(success); |
| + ArrayBufferTracker::ProcessBuffers( |
| + page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| + break; |
| + case kPageNewToOld: |
| + success = object_visitor.VisitBlackObjects( |
| + page, state, &new_to_old_page_visitor_, |
| + LiveObjectVisitor::kKeepMarking); |
| + DCHECK(success); |
| + new_to_old_page_visitor_.account_moved_bytes( |
| + MarkingState::Internal(page).live_bytes()); |
| + // ArrayBufferTracker will be updated during sweeping. |
| + break; |
| + case kPageNewToNew: |
| + success = object_visitor.VisitBlackObjects( |
| + page, state, &new_to_new_page_visitor_, |
| + LiveObjectVisitor::kKeepMarking); |
| + DCHECK(success); |
| + new_to_new_page_visitor_.account_moved_bytes( |
| + MarkingState::Internal(page).live_bytes()); |
| + // ArrayBufferTracker will be updated during sweeping. |
| + break; |
| + case kObjectsOldToOld: |
| + success = object_visitor.VisitBlackObjects( |
| + page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits); |
| + if (!success) { |
| + // Aborted compaction page. We have to record slots here, since we |
| + // might not have recorded them in first place. |
| + // Note: We mark the page as aborted here to be able to record slots |
| + // for code objects in |RecordMigratedSlotVisitor|. |
| + page->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| + EvacuateRecordOnlyVisitor record_visitor(heap()); |
| + success = object_visitor.VisitBlackObjects( |
| + page, state, &record_visitor, LiveObjectVisitor::kKeepMarking); |
| + ArrayBufferTracker::ProcessBuffers( |
| + page, ArrayBufferTracker::kUpdateForwardedKeepOthers); |
| DCHECK(success); |
| + // We need to return failure here to indicate that we want this page |
| + // added to the sweeper. |
| + success = false; |
| + } else { |
| ArrayBufferTracker::ProcessBuffers( |
| page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| - break; |
| - case kPageNewToOld: |
| - success = object_visitor.VisitBlackObjects( |
| - page, state, &new_to_old_page_visitor_, |
| - LiveObjectVisitor::kKeepMarking); |
| - DCHECK(success); |
| - new_to_old_page_visitor_.account_moved_bytes( |
| - MarkingState::Internal(page).live_bytes()); |
| - // ArrayBufferTracker will be updated during sweeping. |
| - break; |
| - case kPageNewToNew: |
| - success = object_visitor.VisitBlackObjects( |
| - page, state, &new_to_new_page_visitor_, |
| - LiveObjectVisitor::kKeepMarking); |
| - DCHECK(success); |
| - new_to_new_page_visitor_.account_moved_bytes( |
| - MarkingState::Internal(page).live_bytes()); |
| - // ArrayBufferTracker will be updated during sweeping. |
| - break; |
| - case kObjectsOldToOld: |
| - success = |
| - object_visitor.VisitBlackObjects(page, state, &old_space_visitor_, |
| - LiveObjectVisitor::kClearMarkbits); |
| - if (!success) { |
| - // Aborted compaction page. We have to record slots here, since we |
| - // might not have recorded them in first place. |
| - // Note: We mark the page as aborted here to be able to record slots |
| - // for code objects in |RecordMigratedSlotVisitor|. |
| - page->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| - EvacuateRecordOnlyVisitor record_visitor(heap()); |
| - success = object_visitor.VisitBlackObjects( |
| - page, state, &record_visitor, LiveObjectVisitor::kKeepMarking); |
| - ArrayBufferTracker::ProcessBuffers( |
| - page, ArrayBufferTracker::kUpdateForwardedKeepOthers); |
| - DCHECK(success); |
| - // We need to return failure here to indicate that we want this page |
| - // added to the sweeper. |
| - success = false; |
| - } else { |
| - ArrayBufferTracker::ProcessBuffers( |
| - page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| - } |
| - break; |
| - } |
| - } |
| - ReportCompactionProgress(evacuation_time, saved_live_bytes); |
| - if (FLAG_trace_evacuation) { |
| - PrintIsolate(heap()->isolate(), |
| - "evacuation[%p]: page=%p new_space=%d " |
| - "page_evacuation=%d executable=%d contains_age_mark=%d " |
| - "live_bytes=%" V8PRIdPTR " time=%f\n", |
| - static_cast<void*>(this), static_cast<void*>(page), |
| - page->InNewSpace(), |
| - page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || |
| - page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), |
| - page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), |
| - page->Contains(heap()->new_space()->age_mark()), |
| - saved_live_bytes, evacuation_time); |
| + } |
| + break; |
| } |
| return success; |
| } |
| -int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
| - intptr_t live_bytes) { |
| - if (!FLAG_parallel_compaction) return 1; |
| - // Compute the number of needed tasks based on a target compaction time, the |
| - // profiled compaction speed and marked live memory. |
| - // |
| - // The number of parallel compaction tasks is limited by: |
| - // - #evacuation pages |
| - // - #cores |
| - const double kTargetCompactionTimeInMs = .5; |
| +class YoungGenerationEvacuator : public Evacuator { |
| + public: |
| + YoungGenerationEvacuator(Heap* heap, |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* migration_observer) |
| + : Evacuator(heap, record_visitor, migration_observer) {} |
| - double compaction_speed = |
| - heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| + bool EvacuatePageImpl(Page* page, const MarkingState& state) override; |
| +}; |
| - const int available_cores = Max( |
| - 1, static_cast<int>( |
| - V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); |
| - int tasks; |
| - if (compaction_speed > 0) { |
| - tasks = 1 + static_cast<int>(live_bytes / compaction_speed / |
| - kTargetCompactionTimeInMs); |
| - } else { |
| - tasks = pages; |
| +bool YoungGenerationEvacuator::EvacuatePageImpl(Page* page, |
| + const MarkingState& state) { |
| + bool success = false; |
| + LiveObjectVisitor object_visitor; |
| + switch (ComputeEvacuationMode(page)) { |
| + case kObjectsNewToOld: |
| + success = object_visitor.VisitBlackObjects( |
| + page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits); |
| + DCHECK(success); |
| + ArrayBufferTracker::ProcessBuffers( |
| + page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| + break; |
| + case kPageNewToOld: |
| + // TODO(mlippautz): Implement page promotion. |
| + UNREACHABLE(); |
| + break; |
| + case kPageNewToNew: |
| + // TODO(mlippautz): Implement page promotion. |
| + UNREACHABLE(); |
| + break; |
| + case kObjectsOldToOld: |
| + UNREACHABLE(); |
| + break; |
| } |
| - const int tasks_capped_pages = Min(pages, tasks); |
| - return Min(available_cores, tasks_capped_pages); |
| + return success; |
| } |
| class EvacuationJobTraits { |
| public: |
| - typedef int* PerPageData; // Pointer to number of aborted pages. |
| + struct PageData { |
| + int* abandoned_pages; // Pointer to number of aborted pages. |
| + MarkingState marking_state; |
| + }; |
| + |
| + typedef PageData PerPageData; |
| typedef Evacuator* PerTaskData; |
| static const bool NeedSequentialFinalization = true; |
| static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
| - MemoryChunk* chunk, PerPageData) { |
| + MemoryChunk* chunk, PerPageData data) { |
| return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk), |
| - MarkingState::Internal(chunk)); |
| + data.marking_state); |
| } |
| static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
| @@ -3347,7 +3678,7 @@ class EvacuationJobTraits { |
| p->ClearEvacuationCandidate(); |
| // Slots have already been recorded so we just need to add it to the |
| // sweeper, which will happen after updating pointers. |
| - *data += 1; |
| + *data.abandoned_pages += 1; |
| } |
| break; |
| default: |
| @@ -3356,6 +3687,90 @@ class EvacuationJobTraits { |
| } |
| }; |
| +namespace { |
| + |
| +// The number of parallel compaction tasks, including the main thread. |
| +int NumberOfParallelCompactionTasks(Heap* heap, int pages, |
| + intptr_t live_bytes) { |
| + if (!FLAG_parallel_compaction) return 1; |
| + // Compute the number of needed tasks based on a target compaction time, the |
| + // profiled compaction speed and marked live memory. |
| + // |
| + // The number of parallel compaction tasks is limited by: |
| + // - #evacuation pages |
| + // - #cores |
| + const double kTargetCompactionTimeInMs = .5; |
| + |
| + const double compaction_speed = |
| + heap->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| + |
| + const int available_cores = Max( |
| + 1, static_cast<int>( |
| + V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); |
| + int tasks; |
| + if (compaction_speed > 0) { |
| + tasks = 1 + static_cast<int>(live_bytes / compaction_speed / |
| + kTargetCompactionTimeInMs); |
| + } else { |
| + tasks = pages; |
| + } |
| + const int tasks_capped_pages = Min(pages, tasks); |
| + return Min(available_cores, tasks_capped_pages); |
| +} |
| + |
| +template <class E, class SV> |
| +void CreateAndExecuteEvacuationTasks(Heap* heap, |
| + PageParallelJob<EvacuationJobTraits>* job, |
| + RecordMigratedSlotVisitor* record_visitor, |
| + MigrationObserver* observer, |
| + const intptr_t live_bytes, |
| + const int& abandoned_pages) { |
| + // Used for trace summary. |
| + double compaction_speed = 0; |
| + if (FLAG_trace_evacuation) { |
| + compaction_speed = heap->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| + } |
| + |
| + const int wanted_num_tasks = |
| + NumberOfParallelCompactionTasks(heap, job->NumberOfPages(), live_bytes); |
| + E** evacuators = new E*[wanted_num_tasks]; |
| + SV** slots_recorders = new SV*[wanted_num_tasks]; |
| + for (int i = 0; i < wanted_num_tasks; i++) { |
| + slots_recorders[i] = new SV(heap->mark_compact_collector()); |
| + evacuators[i] = new E(heap, slots_recorders[i], observer); |
| + } |
| + job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
| + const Address top = heap->new_space()->top(); |
| + for (int i = 0; i < wanted_num_tasks; i++) { |
| + evacuators[i]->Finalize(); |
| + // Try to find the last LAB that was used for new space allocation in |
| + // evacuation tasks. If it was adjacent to the current top, move top back. |
| + const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); |
| + if (info.limit() != nullptr && info.limit() == top) { |
| + DCHECK_NOT_NULL(info.top()); |
| + *heap->new_space()->allocation_top_address() = info.top(); |
| + } |
| + delete evacuators[i]; |
| + delete slots_recorders[i]; |
| + } |
| + delete[] evacuators; |
| + delete[] slots_recorders; |
| + |
| + if (FLAG_trace_evacuation) { |
| + PrintIsolate(heap->isolate(), |
| + "%8.0f ms: evacuation-summary: parallel=%s pages=%d " |
| + "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS |
| + " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", |
| + heap->isolate()->time_millis_since_init(), |
| + FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(), |
| + abandoned_pages, wanted_num_tasks, job->NumberOfTasks(), |
| + V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), |
| + live_bytes, compaction_speed); |
| + } |
| +} |
| + |
| +} // namespace |
| + |
| void MarkCompactCollector::EvacuatePagesInParallel() { |
| PageParallelJob<EvacuationJobTraits> job( |
| heap_, heap_->isolate()->cancelable_task_manager(), |
| @@ -3365,7 +3780,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| intptr_t live_bytes = 0; |
| for (Page* page : old_space_evacuation_pages_) { |
| live_bytes += MarkingState::Internal(page).live_bytes(); |
| - job.AddPage(page, &abandoned_pages); |
| + job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| } |
| const bool reduce_memory = heap()->ShouldReduceMemory(); |
| @@ -3384,49 +3799,37 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| } |
| } |
| - job.AddPage(page, &abandoned_pages); |
| + job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| } |
| DCHECK_GE(job.NumberOfPages(), 1); |
| - // Used for trace summary. |
| - double compaction_speed = 0; |
| - if (FLAG_trace_evacuation) { |
| - compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| - } |
| + MigrationObserver observer(heap()); |
| + CreateAndExecuteEvacuationTasks<FullEvacuator, RecordMigratedSlotVisitor>( |
| + heap(), &job, nullptr, &observer, live_bytes, abandoned_pages); |
| +} |
| - const int wanted_num_tasks = |
| - NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); |
| - FullEvacuator** evacuators = new FullEvacuator*[wanted_num_tasks]; |
| - RecordMigratedSlotVisitor record_visitor(this); |
| - for (int i = 0; i < wanted_num_tasks; i++) { |
| - evacuators[i] = new FullEvacuator(heap(), &record_visitor); |
| - } |
| - job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
| - const Address top = heap()->new_space()->top(); |
| - for (int i = 0; i < wanted_num_tasks; i++) { |
| - evacuators[i]->Finalize(); |
| - // Try to find the last LAB that was used for new space allocation in |
| - // evacuation tasks. If it was adjacent to the current top, move top back. |
| - const AllocationInfo info = evacuators[i]->CloseNewSpaceLAB(); |
| - if (info.limit() != nullptr && info.limit() == top) { |
| - DCHECK_NOT_NULL(info.top()); |
| - *heap()->new_space()->allocation_top_address() = info.top(); |
| - } |
| - delete evacuators[i]; |
| - } |
| - delete[] evacuators; |
| +void MinorMarkCompactCollector::EvacuatePagesInParallel( |
| + std::vector<HeapObject*>* black_allocation_objects) { |
| + PageParallelJob<EvacuationJobTraits> job( |
| + heap_, heap_->isolate()->cancelable_task_manager(), |
| + &page_parallel_job_semaphore_); |
| - if (FLAG_trace_evacuation) { |
| - PrintIsolate(isolate(), |
| - "%8.0f ms: evacuation-summary: parallel=%s pages=%d " |
| - "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS |
| - " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", |
| - isolate()->time_millis_since_init(), |
| - FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), |
| - abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), |
| - V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), |
| - live_bytes, compaction_speed); |
| + int abandoned_pages = 0; |
| + intptr_t live_bytes = 0; |
| + |
| + for (Page* page : new_space_evacuation_pages_) { |
| + intptr_t live_bytes_on_page = marking_state(page).live_bytes(); |
| + live_bytes += live_bytes_on_page; |
| + // TODO(mlippautz): Implement page promotion. |
| + job.AddPage(page, {&abandoned_pages, marking_state(page)}); |
| } |
| + DCHECK_GE(job.NumberOfPages(), 1); |
| + |
| + YoungGenerationMigrationObserver observer( |
| + heap(), heap()->mark_compact_collector(), black_allocation_objects); |
| + CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator, |
|
Michael Lippautz
2017/04/21 07:05:52
The idea is to have a stateless YoungGenerationRec
Hannes Payer (out of office)
2017/04/21 14:46:26
Acknowledged.
|
| + YoungGenerationRecordMigratedSlotVisitor>( |
| + heap(), &job, nullptr, &observer, live_bytes, abandoned_pages); |
| } |
| class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| @@ -3751,6 +4154,7 @@ class PointerUpdateJobTraits { |
| private: |
| static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
| + base::LockGuard<base::RecursiveMutex> guard(chunk->mutex()); |
| if (type == OLD_TO_NEW) { |
| RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
| return CheckAndUpdateOldToNewSlot(heap, slot); |
| @@ -3857,17 +4261,23 @@ void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| class ToSpacePointerUpdateJobTraits { |
| public: |
| - typedef std::pair<Address, Address> PerPageData; |
| + struct PageData { |
| + Address start; |
| + Address end; |
| + MarkingState marking_state; |
| + }; |
| + |
| + typedef PageData PerPageData; |
| typedef PointersUpdatingVisitor* PerTaskData; |
| static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
| - MemoryChunk* chunk, PerPageData limits) { |
| + MemoryChunk* chunk, PerPageData page_data) { |
| if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| // New->new promoted pages contain garbage so they require iteration |
| // using markbits. |
| - ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); |
| + ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data); |
| } else { |
| - ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); |
| + ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data); |
| } |
| return true; |
| } |
| @@ -3879,8 +4289,8 @@ class ToSpacePointerUpdateJobTraits { |
| private: |
| static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
| MemoryChunk* chunk, |
| - PerPageData limits) { |
| - for (Address cur = limits.first; cur < limits.second;) { |
| + PerPageData page_data) { |
| + for (Address cur = page_data.start; cur < page_data.end;) { |
| HeapObject* object = HeapObject::FromAddress(cur); |
| Map* map = object->map(); |
| int size = object->SizeFromMap(map); |
| @@ -3891,8 +4301,8 @@ class ToSpacePointerUpdateJobTraits { |
| static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
| MemoryChunk* chunk, |
| - PerPageData limits) { |
| - LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk)); |
| + PerPageData page_data) { |
| + LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state); |
| HeapObject* object = NULL; |
| while ((object = it.Next()) != NULL) { |
| Map* map = object->map(); |
| @@ -3902,7 +4312,10 @@ class ToSpacePointerUpdateJobTraits { |
| } |
| }; |
| -void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| +template <class MarkingStateProvider> |
| +void UpdateToSpacePointersInParallel( |
| + Heap* heap, base::Semaphore* semaphore, |
| + const MarkingStateProvider& marking_state_provider) { |
| PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
| heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| Address space_start = heap->new_space()->bottom(); |
| @@ -3911,7 +4324,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| Address start = |
| page->Contains(space_start) ? space_start : page->area_start(); |
| Address end = page->Contains(space_end) ? space_end : page->area_end(); |
| - job.AddPage(page, std::make_pair(start, end)); |
| + job.AddPage(page, {start, end, marking_state_provider.marking_state(page)}); |
| } |
| PointersUpdatingVisitor visitor; |
| int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
| @@ -3926,7 +4339,8 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| { |
| TRACE_GC(heap()->tracer(), |
| GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| - UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); |
| + UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| + *this); |
| // Update roots. |
| heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
| @@ -3951,6 +4365,37 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| } |
| } |
| +void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() { |
| + TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
| + |
| + PointersUpdatingVisitor updating_visitor; |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), |
| + GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| + UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_, |
| + *this); |
| + // TODO(mlippautz): Iteration mode is not optimal as we process all |
| + // global handles. Find a way to only process the ones related to new |
| + // space. |
| + heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| + UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
| + } |
| + |
| + { |
| + TRACE_GC(heap()->tracer(), |
| + GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
| + |
| + EvacuationWeakObjectRetainer evacuation_object_retainer; |
| + heap()->ProcessWeakListRoots(&evacuation_object_retainer); |
| + |
| + // Update pointers from external string table. |
| + heap()->UpdateNewSpaceReferencesInExternalStringTable( |
| + &UpdateReferenceInExternalStringTableEntry); |
| + heap()->VisitEncounteredWeakCollections(&updating_visitor); |
| + heap()->set_encountered_weak_collections(Smi::kZero); |
| + } |
| +} |
| void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| for (Page* p : old_space_evacuation_pages_) { |