Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 980e4b3d373aeba23d242eec949edd1001a912b4..884a8abc285cc183e349a474ee6bbf215c6bad15 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -236,11 +236,6 @@ |
void MarkCompactCollector::SetUp() { |
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
- |
free_list_old_space_.Reset(new FreeList(heap_->old_space())); |
free_list_code_space_.Reset(new FreeList(heap_->code_space())); |
free_list_map_space_.Reset(new FreeList(heap_->map_space())); |
@@ -1504,119 +1499,69 @@ |
} |
-class MarkCompactCollector::HeapObjectVisitor { |
- public: |
- virtual ~HeapObjectVisitor() {} |
- virtual bool Visit(HeapObject* object) = 0; |
-}; |
- |
- |
-class MarkCompactCollector::EvacuateNewSpaceVisitor |
- : public MarkCompactCollector::HeapObjectVisitor { |
- public: |
- explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {} |
- |
- virtual bool Visit(HeapObject* object) { |
- Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
- int size = object->Size(); |
- |
- // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
- if (heap_->ShouldBePromoted(object->address(), size) && |
- heap_->mark_compact_collector()->TryPromoteObject(object, size)) { |
- return true; |
- } |
- |
- AllocationAlignment alignment = object->RequiredAlignment(); |
- AllocationResult allocation = |
- heap_->new_space()->AllocateRaw(size, alignment); |
- if (allocation.IsRetry()) { |
- if (!heap_->new_space()->AddFreshPage()) { |
- // Shouldn't happen. We are sweeping linearly, and to-space |
- // has the same number of pages as from-space, so there is |
- // always room unless we are in an OOM situation. |
- FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
- } |
- allocation = heap_->new_space()->AllocateRaw(size, alignment); |
- DCHECK(!allocation.IsRetry()); |
- } |
- Object* target = allocation.ToObjectChecked(); |
- |
- heap_->mark_compact_collector()->MigrateObject( |
- HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
- heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
- } |
- heap_->IncrementSemiSpaceCopiedObjectSize(size); |
- return true; |
- } |
- |
- private: |
- Heap* heap_; |
-}; |
- |
- |
-class MarkCompactCollector::EvacuateOldSpaceVisitor |
- : public MarkCompactCollector::HeapObjectVisitor { |
- public: |
- EvacuateOldSpaceVisitor(Heap* heap, |
- CompactionSpaceCollection* compaction_spaces, |
- SlotsBuffer** evacuation_slots_buffer) |
- : heap_(heap), |
- compaction_spaces_(compaction_spaces), |
- evacuation_slots_buffer_(evacuation_slots_buffer) {} |
- |
- virtual bool Visit(HeapObject* object) { |
- int size = object->Size(); |
- AllocationAlignment alignment = object->RequiredAlignment(); |
- HeapObject* target_object = nullptr; |
- AllocationSpace id = |
- Page::FromAddress(object->address())->owner()->identity(); |
- AllocationResult allocation = |
- compaction_spaces_->Get(id)->AllocateRaw(size, alignment); |
- if (!allocation.To(&target_object)) { |
- return false; |
- } |
- heap_->mark_compact_collector()->MigrateObject( |
- target_object, object, size, id, evacuation_slots_buffer_); |
- DCHECK(object->map_word().IsForwardingAddress()); |
- return true; |
- } |
- |
- private: |
- Heap* heap_; |
- CompactionSpaceCollection* compaction_spaces_; |
- SlotsBuffer** evacuation_slots_buffer_; |
-}; |
- |
- |
-bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, |
- HeapObjectVisitor* visitor, |
- IterationMode mode) { |
- Address offsets[16]; |
- for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { |
+int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( |
+ NewSpace* new_space, NewSpacePage* p) { |
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
+ |
+ MarkBit::CellType* cells = p->markbits()->cells(); |
+ int survivors_size = 0; |
+ |
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
Address cell_base = it.CurrentCellBase(); |
MarkBit::CellType* cell = it.CurrentCell(); |
- if (*cell == 0) continue; |
- |
- int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets); |
- for (int i = 0; i < live_objects; i++) { |
- HeapObject* object = HeapObject::FromAddress(offsets[i]); |
+ MarkBit::CellType current_cell = *cell; |
+ if (current_cell == 0) continue; |
+ |
+ int offset = 0; |
+ while (current_cell != 0) { |
+ int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); |
+ current_cell >>= trailing_zeros; |
+ offset += trailing_zeros; |
+ Address address = cell_base + offset * kPointerSize; |
+ HeapObject* object = HeapObject::FromAddress(address); |
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
- if (!visitor->Visit(object)) { |
- if ((mode == kClearMarkbits) && (i > 0)) { |
- page->markbits()->ClearRange( |
- page->AddressToMarkbitIndex(page->area_start()), |
- page->AddressToMarkbitIndex(offsets[i])); |
+ |
+ int size = object->Size(); |
+ survivors_size += size; |
+ |
+ Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
+ |
+ offset += 2; |
+ current_cell >>= 2; |
+ |
+ // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
+ if (heap()->ShouldBePromoted(object->address(), size) && |
+ TryPromoteObject(object, size)) { |
+ continue; |
+ } |
+ |
+ AllocationAlignment alignment = object->RequiredAlignment(); |
+ AllocationResult allocation = new_space->AllocateRaw(size, alignment); |
+ if (allocation.IsRetry()) { |
+ if (!new_space->AddFreshPage()) { |
+ // Shouldn't happen. We are sweeping linearly, and to-space |
+ // has the same number of pages as from-space, so there is |
+ // always room unless we are in an OOM situation. |
+ FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
} |
- return false; |
- } |
- } |
- if (mode == kClearMarkbits) { |
- *cell = 0; |
- } |
- } |
- return true; |
+ allocation = new_space->AllocateRaw(size, alignment); |
+ DCHECK(!allocation.IsRetry()); |
+ } |
+ Object* target = allocation.ToObjectChecked(); |
+ |
+ MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
+ heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
+ } |
+ heap()->IncrementSemiSpaceCopiedObjectSize(size); |
+ } |
+ *cells = 0; |
+ } |
+ return survivors_size; |
} |
@@ -3137,13 +3082,9 @@ |
// new entries in the store buffer and may cause some pages to be marked |
// scan-on-scavenge. |
NewSpacePageIterator it(from_bottom, from_top); |
- EvacuateNewSpaceVisitor new_space_visitor(heap()); |
while (it.has_next()) { |
NewSpacePage* p = it.next(); |
- survivors_size += p->LiveBytes(); |
- bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); |
- USE(ok); |
- DCHECK(ok); |
+ survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); |
} |
heap_->IncrementYoungSurvivorsCounter(survivors_size); |
@@ -3155,6 +3096,50 @@ |
SlotsBuffer* evacuation_slots_buffer) { |
base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); |
evacuation_slots_buffers_.Add(evacuation_slots_buffer); |
+} |
+ |
+ |
+bool MarkCompactCollector::EvacuateLiveObjectsFromPage( |
+ Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) { |
+ AlwaysAllocateScope always_allocate(isolate()); |
+ DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
+ |
+ Address starts[16]; |
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
+ Address cell_base = it.CurrentCellBase(); |
+ MarkBit::CellType* cell = it.CurrentCell(); |
+ |
+ if (*cell == 0) continue; |
+ |
+ int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts); |
+ for (int i = 0; i < live_objects; i++) { |
+ HeapObject* object = HeapObject::FromAddress(starts[i]); |
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
+ |
+ int size = object->Size(); |
+ AllocationAlignment alignment = object->RequiredAlignment(); |
+ HeapObject* target_object = nullptr; |
+ AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
+ if (!allocation.To(&target_object)) { |
+ // We need to abort compaction for this page. Make sure that we reset |
+ // the mark bits for objects that have already been migrated. |
+ if (i > 0) { |
+ p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()), |
+ p->AddressToMarkbitIndex(starts[i])); |
+ } |
+ return false; |
+ } |
+ |
+ MigrateObject(target_object, object, size, target_space->identity(), |
+ evacuation_slots_buffer); |
+ DCHECK(object->map_word().IsForwardingAddress()); |
+ } |
+ |
+ // Clear marking bits for current cell. |
+ *cell = 0; |
+ } |
+ p->ResetLiveBytes(); |
+ return true; |
} |
@@ -3322,8 +3307,6 @@ |
void MarkCompactCollector::EvacuatePages( |
CompactionSpaceCollection* compaction_spaces, |
SlotsBuffer** evacuation_slots_buffer) { |
- EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, |
- evacuation_slots_buffer); |
for (int i = 0; i < evacuation_candidates_.length(); i++) { |
Page* p = evacuation_candidates_[i]; |
DCHECK(p->IsEvacuationCandidate() || |
@@ -3337,8 +3320,9 @@ |
MemoryChunk::kCompactingInProgress); |
double start = heap()->MonotonicallyIncreasingTimeInMs(); |
intptr_t live_bytes = p->LiveBytes(); |
- if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) { |
- p->ResetLiveBytes(); |
+ if (EvacuateLiveObjectsFromPage( |
+ p, compaction_spaces->Get(p->owner()->identity()), |
+ evacuation_slots_buffer)) { |
p->parallel_compaction_state().SetValue( |
MemoryChunk::kCompactingFinalize); |
compaction_spaces->ReportCompactionProgress( |