Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index 7b3c2a8c6eb75140aa1783353d3ae8b945a35c17..803a7aef7ed531dc2b2a36e2f5381014a6357560 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -236,6 +236,11 @@ static void VerifyEvacuation(Heap* heap) { |
| void MarkCompactCollector::SetUp() { |
| + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| + |
| free_list_old_space_.Reset(new FreeList(heap_->old_space())); |
| free_list_code_space_.Reset(new FreeList(heap_->code_space())); |
| free_list_map_space_.Reset(new FreeList(heap_->map_space())); |
| @@ -1544,69 +1549,125 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { |
| } |
| -int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( |
| - NewSpace* new_space, NewSpacePage* p) { |
| - DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| - DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| - DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| - DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| +class MarkCompactCollector::HeapObjectVisitor { |
| + public: |
| + virtual ~HeapObjectVisitor() {} |
| + virtual bool Visit(HeapObject* object) = 0; |
| +}; |
| - MarkBit::CellType* cells = p->markbits()->cells(); |
| - int survivors_size = 0; |
| - for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| - Address cell_base = it.CurrentCellBase(); |
| - MarkBit::CellType* cell = it.CurrentCell(); |
| +class MarkCompactCollector::EvacuateNewSpaceVisitor |
| + : public MarkCompactCollector::HeapObjectVisitor { |
| + public: |
| + explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {} |
| - MarkBit::CellType current_cell = *cell; |
| - if (current_cell == 0) continue; |
| + virtual bool Visit(HeapObject* object) { |
| + Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
| + int size = object->Size(); |
| - int offset = 0; |
| - while (current_cell != 0) { |
| - int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); |
| - current_cell >>= trailing_zeros; |
| - offset += trailing_zeros; |
| - Address address = cell_base + offset * kPointerSize; |
| - HeapObject* object = HeapObject::FromAddress(address); |
| - DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| + // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
| + if (heap_->ShouldBePromoted(object->address(), size) && |
| + heap_->mark_compact_collector()->TryPromoteObject(object, size)) { |
| + return true; |
| + } |
| - int size = object->Size(); |
| - survivors_size += size; |
| + AllocationAlignment alignment = object->RequiredAlignment(); |
| + AllocationResult allocation = |
| + heap_->new_space()->AllocateRaw(size, alignment); |
| + if (allocation.IsRetry()) { |
| + if (!heap_->new_space()->AddFreshPage()) { |
| + // Shouldn't happen. We are sweeping linearly, and to-space |
| + // has the same number of pages as from-space, so there is |
| + // always room unless we are in an OOM situation. |
| + FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
| + } |
| + allocation = heap_->new_space()->AllocateRaw(size, alignment); |
| + DCHECK(!allocation.IsRetry()); |
| + } |
| + Object* target = allocation.ToObjectChecked(); |
| - Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
| + heap_->mark_compact_collector()->MigrateObject( |
| + HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
| + if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
| + heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
| + } |
| + heap_->IncrementSemiSpaceCopiedObjectSize(size); |
| + return true; |
| + } |
| - offset += 2; |
| - current_cell >>= 2; |
| + private: |
| + Heap* heap_; |
| +}; |
| - // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
| - if (heap()->ShouldBePromoted(object->address(), size) && |
| - TryPromoteObject(object, size)) { |
| - continue; |
| - } |
| - AllocationAlignment alignment = object->RequiredAlignment(); |
| - AllocationResult allocation = new_space->AllocateRaw(size, alignment); |
| - if (allocation.IsRetry()) { |
| - if (!new_space->AddFreshPage()) { |
| - // Shouldn't happen. We are sweeping linearly, and to-space |
| - // has the same number of pages as from-space, so there is |
| - // always room unless we are in an OOM situation. |
| - FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
| - } |
| - allocation = new_space->AllocateRaw(size, alignment); |
| - DCHECK(!allocation.IsRetry()); |
| - } |
| - Object* target = allocation.ToObjectChecked(); |
| +class MarkCompactCollector::EvacuateOldSpaceVisitor |
| + : public MarkCompactCollector::HeapObjectVisitor { |
| + public: |
| + EvacuateOldSpaceVisitor(Heap* heap, |
| + CompactionSpaceCollection* compaction_spaces, |
| + SlotsBuffer** evacuation_slots_buffer) |
| + : heap_(heap), |
| + compaction_spaces_(compaction_spaces), |
| + evacuation_slots_buffer_(evacuation_slots_buffer), |
| + aborted_(false) {} |
| + |
| + virtual bool Visit(HeapObject* object) { |
| + int size = object->Size(); |
| + AllocationAlignment alignment = object->RequiredAlignment(); |
| + HeapObject* target_object = nullptr; |
| + AllocationSpace id = |
| + Page::FromAddress(object->address())->owner()->identity(); |
| + AllocationResult allocation = |
| + compaction_spaces_->Get(id)->AllocateRaw(size, alignment); |
| + if (!allocation.To(&target_object)) { |
| + aborted_ = true; |
| + return false; |
| + } |
| + heap_->mark_compact_collector()->MigrateObject( |
| + target_object, object, size, id, evacuation_slots_buffer_); |
| + DCHECK(object->map_word().IsForwardingAddress()); |
| + return true; |
| + } |
| + |
| + bool aborted() { return aborted_; } |
| + void reset_aborted() { aborted_ = false; } |
| - MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
| - if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
| - heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
| + private: |
| + Heap* heap_; |
| + CompactionSpaceCollection* compaction_spaces_; |
| + SlotsBuffer** evacuation_slots_buffer_; |
| + bool aborted_; |
| +}; |
| + |
| + |
| +void MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, |
| + HeapObjectVisitor* visitor, |
|
Hannes Payer (out of office)
2015/11/24 16:07:35
If this method returns a bool indicating if all ob
Michael Lippautz
2015/11/24 19:19:50
Done. Also added a comment in the .h file and a fu
|
| + IterationMode mode) { |
| + int offsets[16]; |
| + for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { |
| + Address cell_base = it.CurrentCellBase(); |
| + MarkBit::CellType* cell = it.CurrentCell(); |
| + |
| + if (*cell == 0) continue; |
| + |
| + int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| + for (int i = 0; i < live_objects; i++) { |
| + Address object_addr = cell_base + offsets[i] * kPointerSize; |
| + HeapObject* object = HeapObject::FromAddress(object_addr); |
| + DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| + if (!visitor->Visit(object)) { |
| + if ((mode == kClearMarkbits) && (i > 0)) { |
| + page->markbits()->ClearRange( |
| + page->AddressToMarkbitIndex(page->area_start()), |
| + page->AddressToMarkbitIndex(object_addr)); |
| + } |
| + return; |
| } |
| - heap()->IncrementSemiSpaceCopiedObjectSize(size); |
| } |
| - *cells = 0; |
| + if (mode == kClearMarkbits) { |
| + *cell = 0; |
| + } |
| } |
| - return survivors_size; |
| } |
| @@ -3087,9 +3148,11 @@ void MarkCompactCollector::EvacuateNewSpace() { |
| // new entries in the store buffer and may cause some pages to be marked |
| // scan-on-scavenge. |
| NewSpacePageIterator it(from_bottom, from_top); |
| + EvacuateNewSpaceVisitor new_space_visitor(heap()); |
| while (it.has_next()) { |
| NewSpacePage* p = it.next(); |
| - survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); |
| + survivors_size += p->LiveBytes(); |
| + IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); |
| } |
| heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| @@ -3104,51 +3167,6 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( |
| } |
| -bool MarkCompactCollector::EvacuateLiveObjectsFromPage( |
| - Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) { |
| - AlwaysAllocateScope always_allocate(isolate()); |
| - DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
| - |
| - int offsets[16]; |
| - for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| - Address cell_base = it.CurrentCellBase(); |
| - MarkBit::CellType* cell = it.CurrentCell(); |
| - |
| - if (*cell == 0) continue; |
| - |
| - int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| - for (int i = 0; i < live_objects; i++) { |
| - Address object_addr = cell_base + offsets[i] * kPointerSize; |
| - HeapObject* object = HeapObject::FromAddress(object_addr); |
| - DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| - |
| - int size = object->Size(); |
| - AllocationAlignment alignment = object->RequiredAlignment(); |
| - HeapObject* target_object = nullptr; |
| - AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
| - if (!allocation.To(&target_object)) { |
| - // We need to abort compaction for this page. Make sure that we reset |
| - // the mark bits for objects that have already been migrated. |
| - if (i > 0) { |
| - p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()), |
| - p->AddressToMarkbitIndex(object_addr)); |
| - } |
| - return false; |
| - } |
| - |
| - MigrateObject(target_object, object, size, target_space->identity(), |
| - evacuation_slots_buffer); |
| - DCHECK(object->map_word().IsForwardingAddress()); |
| - } |
| - |
| - // Clear marking bits for current cell. |
| - *cell = 0; |
| - } |
| - p->ResetLiveBytes(); |
| - return true; |
| -} |
| - |
| - |
| int MarkCompactCollector::NumberOfParallelCompactionTasks() { |
| if (!FLAG_parallel_compaction) return 1; |
| // Compute the number of needed tasks based on a target compaction time, the |
| @@ -3313,6 +3331,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids, |
| void MarkCompactCollector::EvacuatePages( |
| CompactionSpaceCollection* compaction_spaces, |
| SlotsBuffer** evacuation_slots_buffer) { |
| + EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, |
| + evacuation_slots_buffer); |
| for (int i = 0; i < evacuation_candidates_.length(); i++) { |
| Page* p = evacuation_candidates_[i]; |
| DCHECK(p->IsEvacuationCandidate() || |
| @@ -3326,9 +3346,9 @@ void MarkCompactCollector::EvacuatePages( |
| MemoryChunk::kCompactingInProgress); |
| double start = heap()->MonotonicallyIncreasingTimeInMs(); |
| intptr_t live_bytes = p->LiveBytes(); |
| - if (EvacuateLiveObjectsFromPage( |
| - p, compaction_spaces->Get(p->owner()->identity()), |
| - evacuation_slots_buffer)) { |
| + IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits); |
| + if (!visitor.aborted()) { |
| + p->ResetLiveBytes(); |
| p->parallel_compaction_state().SetValue( |
| MemoryChunk::kCompactingFinalize); |
| compaction_spaces->ReportCompactionProgress( |
| @@ -3336,6 +3356,7 @@ void MarkCompactCollector::EvacuatePages( |
| } else { |
| p->parallel_compaction_state().SetValue( |
| MemoryChunk::kCompactingAborted); |
| + visitor.reset_aborted(); |
| } |
| } else { |
| // There could be popular pages in the list of evacuation candidates |