Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index 65bfdd92d871a262e517452c8c14fe0989482d9b..6387ce6c2c6efdad00b5a28fa013be7585fcaf6d 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -322,7 +322,7 @@ void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { |
| GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); |
| int number_of_pages = evacuation_candidates_.length(); |
| for (int i = 0; i < number_of_pages; i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + MemoryChunk* p = evacuation_candidates_[i]; |
| SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); |
| } |
| } |
| @@ -833,7 +833,7 @@ void MarkCompactCollector::AbortCompaction() { |
| if (compacting_) { |
| int npages = evacuation_candidates_.length(); |
| for (int i = 0; i < npages; i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + MemoryChunk* p = evacuation_candidates_[i]; |
| slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
| p->ClearEvacuationCandidate(); |
| p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| @@ -1551,8 +1551,11 @@ class MarkCompactCollector::HeapObjectVisitor { |
| class MarkCompactCollector::EvacuateVisitorBase |
| : public MarkCompactCollector::HeapObjectVisitor { |
| public: |
| - EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer) |
| - : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {} |
| + EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer, |
| + CompactionSpaceCollection* compaction_spaces) |
| + : heap_(heap), |
| + evacuation_slots_buffer_(evacuation_slots_buffer), |
| + compaction_spaces_(compaction_spaces) {} |
| bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, |
| HeapObject** target_object) { |
| @@ -1562,7 +1565,7 @@ class MarkCompactCollector::EvacuateVisitorBase |
| if (allocation.To(target_object)) { |
| heap_->mark_compact_collector()->MigrateObject( |
| *target_object, object, size, target_space->identity(), |
| - evacuation_slots_buffer_); |
| + evacuation_slots_buffer_, compaction_spaces_->local_store_buffer()); |
| return true; |
| } |
| return false; |
| @@ -1571,6 +1574,7 @@ class MarkCompactCollector::EvacuateVisitorBase |
| protected: |
| Heap* heap_; |
| SlotsBuffer** evacuation_slots_buffer_; |
| + CompactionSpaceCollection* compaction_spaces_; |
| }; |
| @@ -1582,20 +1586,22 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final |
| explicit EvacuateNewSpaceVisitor(Heap* heap, |
| SlotsBuffer** evacuation_slots_buffer, |
| - HashMap* local_pretenuring_feedback) |
| - : EvacuateVisitorBase(heap, evacuation_slots_buffer), |
| + CompactionSpaceCollection* compaction_spaces) |
| + : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces), |
| buffer_(LocalAllocationBuffer::InvalidBuffer()), |
| space_to_allocate_(NEW_SPACE), |
| promoted_size_(0), |
| semispace_copied_size_(0), |
| - local_pretenuring_feedback_(local_pretenuring_feedback) {} |
| + local_pretenuring_feedback_( |
| + compaction_spaces->local_pretenuring_feedback()) {} |
| bool Visit(HeapObject* object) override { |
| heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); |
| int size = object->Size(); |
| HeapObject* target_object = nullptr; |
| if (heap_->ShouldBePromoted(object->address(), size) && |
| - TryEvacuateObject(heap_->old_space(), object, &target_object)) { |
| + TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, |
| + &target_object)) { |
| // If we end up needing more special cases, we should factor this out. |
| if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { |
| heap_->array_buffer_tracker()->Promote( |
| @@ -1608,7 +1614,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final |
| AllocationSpace space = AllocateTargetObject(object, &target); |
| heap_->mark_compact_collector()->MigrateObject( |
| HeapObject::cast(target), object, size, space, |
| - (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); |
| + (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_, |
| + compaction_spaces_->local_store_buffer()); |
| if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
| heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
| } |
| @@ -1680,8 +1687,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final |
| inline AllocationResult AllocateInOldSpace(int size_in_bytes, |
| AllocationAlignment alignment) { |
| - AllocationResult allocation = |
| - heap_->old_space()->AllocateRaw(size_in_bytes, alignment); |
| + AllocationResult allocation = compaction_spaces_->Get(OLD_SPACE) |
| + ->AllocateRaw(size_in_bytes, alignment); |
| if (allocation.IsRetry()) { |
| FatalProcessOutOfMemory( |
| "MarkCompactCollector: semi-space copy, fallback in old gen\n"); |
| @@ -1728,8 +1735,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final |
| EvacuateOldSpaceVisitor(Heap* heap, |
| CompactionSpaceCollection* compaction_spaces, |
| SlotsBuffer** evacuation_slots_buffer) |
| - : EvacuateVisitorBase(heap, evacuation_slots_buffer), |
| - compaction_spaces_(compaction_spaces) {} |
| + : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces) {} |
| bool Visit(HeapObject* object) override { |
| CompactionSpace* target_space = compaction_spaces_->Get( |
| @@ -1743,7 +1749,6 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final |
| } |
| private: |
| - CompactionSpaceCollection* compaction_spaces_; |
| }; |
| @@ -2553,12 +2558,13 @@ void MarkCompactCollector::AbortTransitionArrays() { |
| void MarkCompactCollector::RecordMigratedSlot( |
| - Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) { |
| + Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer, |
| + LocalStoreBuffer* local_store_buffer) { |
| // When parallel compaction is in progress, store and slots buffer entries |
| // require synchronization. |
| if (heap_->InNewSpace(value)) { |
| if (compaction_in_progress_) { |
| - heap_->store_buffer()->MarkSynchronized(slot); |
| + local_store_buffer->Record(slot); |
| } else { |
| heap_->store_buffer()->Mark(slot); |
| } |
| @@ -2640,19 +2646,23 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { |
| class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| public: |
| RecordMigratedSlotVisitor(MarkCompactCollector* collector, |
| - SlotsBuffer** evacuation_slots_buffer) |
| + SlotsBuffer** evacuation_slots_buffer, |
| + LocalStoreBuffer* local_store_buffer) |
| : collector_(collector), |
| - evacuation_slots_buffer_(evacuation_slots_buffer) {} |
| + evacuation_slots_buffer_(evacuation_slots_buffer), |
| + local_store_buffer_(local_store_buffer) {} |
| V8_INLINE void VisitPointer(Object** p) override { |
| collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), |
| - evacuation_slots_buffer_); |
| + evacuation_slots_buffer_, |
| + local_store_buffer_); |
| } |
| V8_INLINE void VisitPointers(Object** start, Object** end) override { |
| while (start < end) { |
| collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), |
| - evacuation_slots_buffer_); |
| + evacuation_slots_buffer_, |
| + local_store_buffer_); |
| ++start; |
| } |
| } |
| @@ -2668,6 +2678,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| private: |
| MarkCompactCollector* collector_; |
| SlotsBuffer** evacuation_slots_buffer_; |
| + LocalStoreBuffer* local_store_buffer_; |
| }; |
| @@ -2685,9 +2696,10 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| // pointer iteration. This is an issue if the store buffer overflows and we |
| // have to scan the entire old space, including dead objects, looking for |
| // pointers to new space. |
| -void MarkCompactCollector::MigrateObject( |
| - HeapObject* dst, HeapObject* src, int size, AllocationSpace dest, |
| - SlotsBuffer** evacuation_slots_buffer) { |
| +void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, |
| + int size, AllocationSpace dest, |
| + SlotsBuffer** evacuation_slots_buffer, |
| + LocalStoreBuffer* local_store_buffer) { |
| Address dst_addr = dst->address(); |
| Address src_addr = src->address(); |
| DCHECK(heap()->AllowedToBeMigrated(src, dest)); |
| @@ -2698,7 +2710,8 @@ void MarkCompactCollector::MigrateObject( |
| DCHECK(IsAligned(size, kPointerSize)); |
| heap()->MoveBlock(dst->address(), src->address(), size); |
| - RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer); |
| + RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer, |
| + local_store_buffer); |
| dst->IterateBody(&visitor); |
| } else if (dest == CODE_SPACE) { |
| DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); |
| @@ -3060,54 +3073,25 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot, |
| void MarkCompactCollector::EvacuateNewSpacePrologue() { |
| - // There are soft limits in the allocation code, designed trigger a mark |
| - // sweep collection by failing allocations. But since we are already in |
| - // a mark-sweep allocation, there is no sense in trying to trigger one. |
| - AlwaysAllocateScope scope(isolate()); |
| - |
| NewSpace* new_space = heap()->new_space(); |
| - |
| - // Store allocation range before flipping semispaces. |
| - Address from_bottom = new_space->bottom(); |
| - Address from_top = new_space->top(); |
| - |
| - // Flip the semispaces. After flipping, to space is empty, from space has |
| - // live objects. |
| - new_space->Flip(); |
| - new_space->ResetAllocationInfo(); |
| - |
| - newspace_evacuation_candidates_.Clear(); |
| - NewSpacePageIterator it(from_bottom, from_top); |
| + NewSpacePageIterator it(new_space->bottom(), new_space->top()); |
| + // Append the list of new space pages to be processed. |
| while (it.has_next()) { |
| - newspace_evacuation_candidates_.Add(it.next()); |
| + evacuation_candidates_.Add(it.next()); |
|
ulan
2016/01/15 10:44:43
As discussed offline, it would be cleaner to have
Michael Lippautz
2016/01/15 13:09:52
Done.
|
| } |
| + new_space->Flip(); |
| + new_space->ResetAllocationInfo(); |
| } |
| -HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() { |
| - HashMap* local_pretenuring_feedback = new HashMap( |
| - HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity); |
| - EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_, |
| - local_pretenuring_feedback); |
| - // First pass: traverse all objects in inactive semispace, remove marks, |
| - // migrate live objects and write forwarding addresses. This stage puts |
| - // new entries in the store buffer and may cause some pages to be marked |
| - // scan-on-scavenge. |
| - for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) { |
| - NewSpacePage* p = |
| - reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]); |
| - bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits); |
| - USE(ok); |
| - DCHECK(ok); |
| +void MarkCompactCollector::EvacuateNewSpaceEpilogue() { |
| + // NewSpace pages have been appended to this list. We remove them by |
| + // iterating over the list from the end. |
| + MemoryChunk* p = nullptr; |
| + while (evacuation_candidates_.length() > 0 && |
| + ((p = evacuation_candidates_.last()) != nullptr) && p->InNewSpace()) { |
| + evacuation_candidates_.Remove(evacuation_candidates_.length() - 1); |
| } |
| - heap_->IncrementPromotedObjectsSize( |
| - static_cast<int>(new_space_visitor.promoted_size())); |
| - heap_->IncrementSemiSpaceCopiedObjectSize( |
| - static_cast<int>(new_space_visitor.semispace_copied_size())); |
| - heap_->IncrementYoungSurvivorsCounter( |
| - static_cast<int>(new_space_visitor.promoted_size()) + |
| - static_cast<int>(new_space_visitor.semispace_copied_size())); |
| - return local_pretenuring_feedback; |
| } |
| @@ -3126,39 +3110,38 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks() { |
| // The number of parallel compaction tasks is limited by: |
| // - #evacuation pages |
| // - (#cores - 1) |
| - // - a hard limit |
| const double kTargetCompactionTimeInMs = 1; |
| - const int kMaxCompactionTasks = 8; |
|
Michael Lippautz
2016/01/14 19:51:55
I removed the hard limit here. We are still capped
|
| intptr_t compaction_speed = |
| heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| - if (compaction_speed == 0) return 1; |
| intptr_t live_bytes = 0; |
| - for (Page* page : evacuation_candidates_) { |
| - live_bytes += page->LiveBytes(); |
| + for (MemoryChunk* chunk : evacuation_candidates_) { |
| + live_bytes += chunk->LiveBytes(); |
| } |
| const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1); |
| - const int tasks = |
| - 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed / |
| - kTargetCompactionTimeInMs); |
| + int tasks; |
| + if (compaction_speed > 0) { |
| + tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / |
| + compaction_speed / kTargetCompactionTimeInMs); |
| + } else { |
| + tasks = evacuation_candidates_.length(); |
| + } |
| const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks); |
| const int tasks_capped_cores = Min(cores, tasks_capped_pages); |
| - const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores); |
| - return tasks_capped_hard; |
| + return tasks_capped_cores; |
| } |
| void MarkCompactCollector::EvacuatePagesInParallel() { |
| - const int num_pages = evacuation_candidates_.length(); |
| - if (num_pages == 0) return; |
| + DCHECK_GE(evacuation_candidates_.length(), 1); |
| // Used for trace summary. |
| intptr_t live_bytes = 0; |
| intptr_t compaction_speed = 0; |
| if (FLAG_trace_fragmentation) { |
| - for (Page* page : evacuation_candidates_) { |
| + for (MemoryChunk* page : evacuation_candidates_) { |
| live_bytes += page->LiveBytes(); |
| } |
| compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| @@ -3186,7 +3169,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| double compaction_duration = 0.0; |
| intptr_t compacted_memory = 0; |
| - // Merge back memory (compacted and unused) from compaction spaces. |
| + // Merge back memory (compacted and unused) from compaction spaces and update |
| + // pretenuring feedback. |
| for (int i = 0; i < num_tasks; i++) { |
| heap()->old_space()->MergeCompactionSpace( |
| compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); |
| @@ -3194,6 +3178,10 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); |
| compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted(); |
| compaction_duration += compaction_spaces_for_tasks[i]->duration(); |
| + heap()->MergeAllocationSitePretenuringFeedback( |
| + *compaction_spaces_for_tasks[i]->local_pretenuring_feedback()); |
| + compaction_spaces_for_tasks[i]->local_store_buffer()->Process( |
| + heap()->store_buffer()); |
| delete compaction_spaces_for_tasks[i]; |
| } |
| delete[] compaction_spaces_for_tasks; |
| @@ -3201,8 +3189,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| // Finalize sequentially. |
| int abandoned_pages = 0; |
| - for (int i = 0; i < num_pages; i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + for (MemoryChunk* p : evacuation_candidates_) { |
| switch (p->parallel_compaction_state().Value()) { |
| case MemoryChunk::ParallelCompactingState::kCompactingAborted: |
| // We have partially compacted the page, i.e., some objects may have |
| @@ -3221,21 +3208,24 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| // after reusing the memory. Note that all existing store buffer |
| // entries of such pages are filtered before rescanning. |
| DCHECK(p->IsEvacuationCandidate()); |
| + DCHECK(!p->InNewSpace()); |
| p->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| p->set_scan_on_scavenge(true); |
| abandoned_pages++; |
| break; |
| case MemoryChunk::kCompactingFinalize: |
| - DCHECK(p->IsEvacuationCandidate()); |
| - p->SetWasSwept(); |
| - p->Unlink(); |
| + DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); |
| + if (!p->InNewSpace()) { |
| + reinterpret_cast<Page*>(p)->SetWasSwept(); |
| + p->Unlink(); |
| + } |
| break; |
| case MemoryChunk::kCompactingDone: |
| DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); |
| DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| break; |
| default: |
| - // We should not observe kCompactingInProgress, or kCompactingDone. |
| + // MemoryChunk::kCompactingInProgress. |
| UNREACHABLE(); |
| } |
| p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); |
| @@ -3246,7 +3236,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX |
| "d compaction_speed=%" V8_PTR_PREFIX "d\n", |
| isolate()->time_millis_since_init(), FLAG_parallel_compaction, |
| - num_pages, abandoned_pages, num_tasks, |
| + evacuation_candidates_.length(), abandoned_pages, num_tasks, |
| base::SysInfo::NumberOfProcessors(), live_bytes, |
| compaction_speed); |
| } |
| @@ -3287,28 +3277,46 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids, |
| void MarkCompactCollector::EvacuatePages( |
| CompactionSpaceCollection* compaction_spaces, |
| SlotsBuffer** evacuation_slots_buffer) { |
| - EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, |
| - evacuation_slots_buffer); |
| - for (int i = 0; i < evacuation_candidates_.length(); i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + EvacuateOldSpaceVisitor old_space_visitor(heap(), compaction_spaces, |
| + evacuation_slots_buffer); |
| + EvacuateNewSpaceVisitor new_space_visitor(heap(), evacuation_slots_buffer, |
| + compaction_spaces); |
| + // We run through the list in reverse order to process newspace pages first, |
| + // effectively reducing the number of old-to-new references and thus the |
| + // load on the store buffer. Note that processing is still interleaved. |
| + MemoryChunk* p = nullptr; |
| + for (int i = evacuation_candidates_.length() - 1; i >= 0; --i) { |
| + p = evacuation_candidates_[i]; |
| DCHECK(p->IsEvacuationCandidate() || |
| - p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| - DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == |
| - MemoryChunk::kSweepingDone); |
| + p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || p->InNewSpace()); |
| + DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()), |
| + MemoryChunk::kSweepingDone); |
| if (p->parallel_compaction_state().TrySetValue( |
| MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { |
| - if (p->IsEvacuationCandidate()) { |
| + if (p->IsEvacuationCandidate() || p->InNewSpace()) { |
| DCHECK_EQ(p->parallel_compaction_state().Value(), |
| MemoryChunk::kCompactingInProgress); |
| - double start = heap()->MonotonicallyIncreasingTimeInMs(); |
| - intptr_t live_bytes = p->LiveBytes(); |
| - AlwaysAllocateScope always_allocate(isolate()); |
| - if (VisitLiveObjects(p, &visitor, kClearMarkbits)) { |
| + int saved_live_bytes = p->LiveBytes(); |
| + double evacuation_time; |
| + bool success; |
| + { |
| + AlwaysAllocateScope always_allocate(isolate()); |
| + TimedScope timed_scope(heap(), &evacuation_time); |
| + success = |
| + p->InNewSpace() |
| + ? VisitLiveObjects(p, &new_space_visitor, kClearMarkbits) |
| + : VisitLiveObjects(p, &old_space_visitor, kClearMarkbits); |
| + } |
| + // New space evacuation bails out to a regular semispace copy in OOM |
| + // cases. A failing semispace copy fails hard, before reaching this |
| + // point. |
| + DCHECK(!p->InNewSpace() || success); |
| + if (success) { |
| + compaction_spaces->ReportCompactionProgress(evacuation_time, |
| + saved_live_bytes); |
| p->ResetLiveBytes(); |
| p->parallel_compaction_state().SetValue( |
| MemoryChunk::kCompactingFinalize); |
| - compaction_spaces->ReportCompactionProgress( |
| - heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); |
| } else { |
| p->parallel_compaction_state().SetValue( |
| MemoryChunk::kCompactingAborted); |
| @@ -3320,6 +3328,13 @@ void MarkCompactCollector::EvacuatePages( |
| } |
| } |
| } |
| + |
| + heap()->IncrementPromotedObjectsSize(new_space_visitor.promoted_size()); |
| + heap()->IncrementSemiSpaceCopiedObjectSize( |
| + new_space_visitor.semispace_copied_size()); |
| + heap()->IncrementYoungSurvivorsCounter( |
| + new_space_visitor.promoted_size() + |
| + new_space_visitor.semispace_copied_size()); |
| } |
| @@ -3474,7 +3489,7 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot, |
| // that is located in an unmovable page. |
| int npages = evacuation_candidates_.length(); |
| for (int i = 0; i < npages; i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + MemoryChunk* p = evacuation_candidates_[i]; |
| DCHECK(p->IsEvacuationCandidate() || |
| p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| if (p->IsEvacuationCandidate()) { |
| @@ -3555,7 +3570,7 @@ void MarkCompactCollector::VisitLiveObjectsBody(Page* page, |
| void MarkCompactCollector::SweepAbortedPages() { |
| // Second pass on aborted pages. |
| for (int i = 0; i < evacuation_candidates_.length(); i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]); |
| if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); |
| PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| @@ -3586,26 +3601,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| Heap::RelocationLock relocation_lock(heap()); |
| - HashMap* local_pretenuring_feedback = nullptr; |
| { |
| GCTracer::Scope gc_scope(heap()->tracer(), |
| GCTracer::Scope::MC_EVACUATE_NEW_SPACE); |
| EvacuationScope evacuation_scope(this); |
| - EvacuateNewSpacePrologue(); |
| - local_pretenuring_feedback = EvacuateNewSpaceInParallel(); |
| - heap_->new_space()->set_age_mark(heap_->new_space()->top()); |
| - } |
| - { |
| - GCTracer::Scope gc_scope(heap()->tracer(), |
| - GCTracer::Scope::MC_EVACUATE_CANDIDATES); |
| - EvacuationScope evacuation_scope(this); |
| + EvacuateNewSpacePrologue(); |
| EvacuatePagesInParallel(); |
| - } |
| - |
| - { |
| - heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback); |
| - delete local_pretenuring_feedback; |
| + EvacuateNewSpaceEpilogue(); |
| + heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
| } |
| UpdatePointersAfterEvacuation(); |
| @@ -3688,7 +3692,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| heap()->tracer(), |
| GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); |
| for (int i = 0; i < npages; i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]); |
| DCHECK(p->IsEvacuationCandidate() || |
| p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| @@ -3764,7 +3768,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { |
| int npages = evacuation_candidates_.length(); |
| for (int i = 0; i < npages; i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]); |
| if (!p->IsEvacuationCandidate()) continue; |
| p->Unlink(); |
| PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| @@ -3776,7 +3780,7 @@ void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { |
| void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| int npages = evacuation_candidates_.length(); |
| for (int i = 0; i < npages; i++) { |
| - Page* p = evacuation_candidates_[i]; |
| + Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]); |
| if (!p->IsEvacuationCandidate()) continue; |
| PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| space->Free(p->area_start(), p->area_size()); |