Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 884a8abc285cc183e349a474ee6bbf215c6bad15..980e4b3d373aeba23d242eec949edd1001a912b4 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -236,6 +236,11 @@ static void VerifyEvacuation(Heap* heap) { |
void MarkCompactCollector::SetUp() { |
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
+ |
free_list_old_space_.Reset(new FreeList(heap_->old_space())); |
free_list_code_space_.Reset(new FreeList(heap_->code_space())); |
free_list_map_space_.Reset(new FreeList(heap_->map_space())); |
@@ -1499,69 +1504,119 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { |
} |
-int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( |
- NewSpace* new_space, NewSpacePage* p) { |
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
+class MarkCompactCollector::HeapObjectVisitor { |
+ public: |
+ virtual ~HeapObjectVisitor() {} |
+ virtual bool Visit(HeapObject* object) = 0; |
+}; |
- MarkBit::CellType* cells = p->markbits()->cells(); |
- int survivors_size = 0; |
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
- Address cell_base = it.CurrentCellBase(); |
- MarkBit::CellType* cell = it.CurrentCell(); |
+class MarkCompactCollector::EvacuateNewSpaceVisitor |
+ : public MarkCompactCollector::HeapObjectVisitor { |
+ public: |
+ explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {} |
- MarkBit::CellType current_cell = *cell; |
- if (current_cell == 0) continue; |
+ virtual bool Visit(HeapObject* object) { |
+ Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
+ int size = object->Size(); |
- int offset = 0; |
- while (current_cell != 0) { |
- int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); |
- current_cell >>= trailing_zeros; |
- offset += trailing_zeros; |
- Address address = cell_base + offset * kPointerSize; |
- HeapObject* object = HeapObject::FromAddress(address); |
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
+ // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
+ if (heap_->ShouldBePromoted(object->address(), size) && |
+ heap_->mark_compact_collector()->TryPromoteObject(object, size)) { |
+ return true; |
+ } |
- int size = object->Size(); |
- survivors_size += size; |
+ AllocationAlignment alignment = object->RequiredAlignment(); |
+ AllocationResult allocation = |
+ heap_->new_space()->AllocateRaw(size, alignment); |
+ if (allocation.IsRetry()) { |
+ if (!heap_->new_space()->AddFreshPage()) { |
+ // Shouldn't happen. We are sweeping linearly, and to-space |
+ // has the same number of pages as from-space, so there is |
+ // always room unless we are in an OOM situation. |
+ FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
+ } |
+ allocation = heap_->new_space()->AllocateRaw(size, alignment); |
+ DCHECK(!allocation.IsRetry()); |
+ } |
+ Object* target = allocation.ToObjectChecked(); |
+ |
+ heap_->mark_compact_collector()->MigrateObject( |
+ HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
+ heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
+ } |
+ heap_->IncrementSemiSpaceCopiedObjectSize(size); |
+ return true; |
+ } |
- Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
+ private: |
+ Heap* heap_; |
+}; |
- offset += 2; |
- current_cell >>= 2; |
- // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
- if (heap()->ShouldBePromoted(object->address(), size) && |
- TryPromoteObject(object, size)) { |
- continue; |
- } |
+class MarkCompactCollector::EvacuateOldSpaceVisitor |
+ : public MarkCompactCollector::HeapObjectVisitor { |
+ public: |
+ EvacuateOldSpaceVisitor(Heap* heap, |
+ CompactionSpaceCollection* compaction_spaces, |
+ SlotsBuffer** evacuation_slots_buffer) |
+ : heap_(heap), |
+ compaction_spaces_(compaction_spaces), |
+ evacuation_slots_buffer_(evacuation_slots_buffer) {} |
- AllocationAlignment alignment = object->RequiredAlignment(); |
- AllocationResult allocation = new_space->AllocateRaw(size, alignment); |
- if (allocation.IsRetry()) { |
- if (!new_space->AddFreshPage()) { |
- // Shouldn't happen. We are sweeping linearly, and to-space |
- // has the same number of pages as from-space, so there is |
- // always room unless we are in an OOM situation. |
- FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
- } |
- allocation = new_space->AllocateRaw(size, alignment); |
- DCHECK(!allocation.IsRetry()); |
- } |
- Object* target = allocation.ToObjectChecked(); |
+ virtual bool Visit(HeapObject* object) { |
+ int size = object->Size(); |
+ AllocationAlignment alignment = object->RequiredAlignment(); |
+ HeapObject* target_object = nullptr; |
+ AllocationSpace id = |
+ Page::FromAddress(object->address())->owner()->identity(); |
+ AllocationResult allocation = |
+ compaction_spaces_->Get(id)->AllocateRaw(size, alignment); |
+ if (!allocation.To(&target_object)) { |
+ return false; |
+ } |
+ heap_->mark_compact_collector()->MigrateObject( |
+ target_object, object, size, id, evacuation_slots_buffer_); |
+ DCHECK(object->map_word().IsForwardingAddress()); |
+ return true; |
+ } |
+ |
+ private: |
+ Heap* heap_; |
+ CompactionSpaceCollection* compaction_spaces_; |
+ SlotsBuffer** evacuation_slots_buffer_; |
+}; |
+ |
+ |
+bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, |
+ HeapObjectVisitor* visitor, |
+ IterationMode mode) { |
+ Address offsets[16]; |
+ for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { |
+ Address cell_base = it.CurrentCellBase(); |
+ MarkBit::CellType* cell = it.CurrentCell(); |
+ |
+ if (*cell == 0) continue; |
- MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
- heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
+ int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets); |
+ for (int i = 0; i < live_objects; i++) { |
+ HeapObject* object = HeapObject::FromAddress(offsets[i]); |
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
+ if (!visitor->Visit(object)) { |
+ if ((mode == kClearMarkbits) && (i > 0)) { |
+ page->markbits()->ClearRange( |
+ page->AddressToMarkbitIndex(page->area_start()), |
+ page->AddressToMarkbitIndex(offsets[i])); |
+ } |
+ return false; |
} |
- heap()->IncrementSemiSpaceCopiedObjectSize(size); |
} |
- *cells = 0; |
+ if (mode == kClearMarkbits) { |
+ *cell = 0; |
+ } |
} |
- return survivors_size; |
+ return true; |
} |
@@ -3082,9 +3137,13 @@ void MarkCompactCollector::EvacuateNewSpace() { |
// new entries in the store buffer and may cause some pages to be marked |
// scan-on-scavenge. |
NewSpacePageIterator it(from_bottom, from_top); |
+ EvacuateNewSpaceVisitor new_space_visitor(heap()); |
while (it.has_next()) { |
NewSpacePage* p = it.next(); |
- survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); |
+ survivors_size += p->LiveBytes(); |
+ bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); |
+ USE(ok); |
+ DCHECK(ok); |
} |
heap_->IncrementYoungSurvivorsCounter(survivors_size); |
@@ -3099,50 +3158,6 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( |
} |
-bool MarkCompactCollector::EvacuateLiveObjectsFromPage( |
- Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) { |
- AlwaysAllocateScope always_allocate(isolate()); |
- DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
- |
- Address starts[16]; |
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
- Address cell_base = it.CurrentCellBase(); |
- MarkBit::CellType* cell = it.CurrentCell(); |
- |
- if (*cell == 0) continue; |
- |
- int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts); |
- for (int i = 0; i < live_objects; i++) { |
- HeapObject* object = HeapObject::FromAddress(starts[i]); |
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
- |
- int size = object->Size(); |
- AllocationAlignment alignment = object->RequiredAlignment(); |
- HeapObject* target_object = nullptr; |
- AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
- if (!allocation.To(&target_object)) { |
- // We need to abort compaction for this page. Make sure that we reset |
- // the mark bits for objects that have already been migrated. |
- if (i > 0) { |
- p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()), |
- p->AddressToMarkbitIndex(starts[i])); |
- } |
- return false; |
- } |
- |
- MigrateObject(target_object, object, size, target_space->identity(), |
- evacuation_slots_buffer); |
- DCHECK(object->map_word().IsForwardingAddress()); |
- } |
- |
- // Clear marking bits for current cell. |
- *cell = 0; |
- } |
- p->ResetLiveBytes(); |
- return true; |
-} |
- |
- |
int MarkCompactCollector::NumberOfParallelCompactionTasks() { |
if (!FLAG_parallel_compaction) return 1; |
// Compute the number of needed tasks based on a target compaction time, the |
@@ -3307,6 +3322,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids, |
void MarkCompactCollector::EvacuatePages( |
CompactionSpaceCollection* compaction_spaces, |
SlotsBuffer** evacuation_slots_buffer) { |
+ EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, |
+ evacuation_slots_buffer); |
for (int i = 0; i < evacuation_candidates_.length(); i++) { |
Page* p = evacuation_candidates_[i]; |
DCHECK(p->IsEvacuationCandidate() || |
@@ -3320,9 +3337,8 @@ void MarkCompactCollector::EvacuatePages( |
MemoryChunk::kCompactingInProgress); |
double start = heap()->MonotonicallyIncreasingTimeInMs(); |
intptr_t live_bytes = p->LiveBytes(); |
- if (EvacuateLiveObjectsFromPage( |
- p, compaction_spaces->Get(p->owner()->identity()), |
- evacuation_slots_buffer)) { |
+ if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) { |
+ p->ResetLiveBytes(); |
p->parallel_compaction_state().SetValue( |
MemoryChunk::kCompactingFinalize); |
compaction_spaces->ReportCompactionProgress( |