| Index: src/heap/mark-compact.cc
|
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
|
| index 42cb71e8e182f30913ebff21b44ab84a601f47bd..c66cd0c13ac7607bc2939c29b931bb0aeca7708a 100644
|
| --- a/src/heap/mark-compact.cc
|
| +++ b/src/heap/mark-compact.cc
|
| @@ -1533,18 +1533,51 @@ class MarkCompactCollector::HeapObjectVisitor {
|
| };
|
|
|
|
|
| -class MarkCompactCollector::EvacuateNewSpaceVisitor
|
| +class MarkCompactCollector::EvacuateVisitorBase
|
| : public MarkCompactCollector::HeapObjectVisitor {
|
| public:
|
| - explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {}
|
| + EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
|
| + : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
|
|
|
| - virtual bool Visit(HeapObject* object) {
|
| - Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
|
| + bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
|
| + HeapObject** target_object) {
|
| int size = object->Size();
|
| + AllocationAlignment alignment = object->RequiredAlignment();
|
| + AllocationResult allocation = target_space->AllocateRaw(size, alignment);
|
| + if (allocation.To(target_object)) {
|
| + heap_->mark_compact_collector()->MigrateObject(
|
| + *target_object, object, size, target_space->identity(),
|
| + evacuation_slots_buffer_);
|
| + return true;
|
| + }
|
| + return false;
|
| + }
|
|
|
| - // TODO(hpayer): Refactor EvacuateObject and call this function instead.
|
| + protected:
|
| + Heap* heap_;
|
| + SlotsBuffer** evacuation_slots_buffer_;
|
| +};
|
| +
|
| +
|
| +class MarkCompactCollector::EvacuateNewSpaceVisitor
|
| + : public MarkCompactCollector::EvacuateVisitorBase {
|
| + public:
|
| + explicit EvacuateNewSpaceVisitor(Heap* heap,
|
| + SlotsBuffer** evacuation_slots_buffer)
|
| + : EvacuateVisitorBase(heap, evacuation_slots_buffer) {}
|
| +
|
| + bool Visit(HeapObject* object) override {
|
| + Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
|
| + int size = object->Size();
|
| + HeapObject* target_object = nullptr;
|
| if (heap_->ShouldBePromoted(object->address(), size) &&
|
| - heap_->mark_compact_collector()->TryPromoteObject(object, size)) {
|
| + TryEvacuateObject(heap_->old_space(), object, &target_object)) {
|
| + // If we end up needing more special cases, we should factor this out.
|
| + if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
|
| + heap_->array_buffer_tracker()->Promote(
|
| + JSArrayBuffer::cast(target_object));
|
| + }
|
| + heap_->IncrementPromotedObjectsSize(size);
|
| return true;
|
| }
|
|
|
| @@ -1571,77 +1604,34 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor
|
| heap_->IncrementSemiSpaceCopiedObjectSize(size);
|
| return true;
|
| }
|
| -
|
| - private:
|
| - Heap* heap_;
|
| };
|
|
|
|
|
| class MarkCompactCollector::EvacuateOldSpaceVisitor
|
| - : public MarkCompactCollector::HeapObjectVisitor {
|
| + : public MarkCompactCollector::EvacuateVisitorBase {
|
| public:
|
| EvacuateOldSpaceVisitor(Heap* heap,
|
| CompactionSpaceCollection* compaction_spaces,
|
| SlotsBuffer** evacuation_slots_buffer)
|
| - : heap_(heap),
|
| - compaction_spaces_(compaction_spaces),
|
| - evacuation_slots_buffer_(evacuation_slots_buffer) {}
|
| + : EvacuateVisitorBase(heap, evacuation_slots_buffer),
|
| + compaction_spaces_(compaction_spaces) {}
|
|
|
| - virtual bool Visit(HeapObject* object) {
|
| - int size = object->Size();
|
| - AllocationAlignment alignment = object->RequiredAlignment();
|
| + bool Visit(HeapObject* object) override {
|
| + CompactionSpace* target_space = compaction_spaces_->Get(
|
| + Page::FromAddress(object->address())->owner()->identity());
|
| HeapObject* target_object = nullptr;
|
| - AllocationSpace id =
|
| - Page::FromAddress(object->address())->owner()->identity();
|
| - AllocationResult allocation =
|
| - compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
|
| - if (!allocation.To(&target_object)) {
|
| - return false;
|
| + if (TryEvacuateObject(target_space, object, &target_object)) {
|
| + DCHECK(object->map_word().IsForwardingAddress());
|
| + return true;
|
| }
|
| - heap_->mark_compact_collector()->MigrateObject(
|
| - target_object, object, size, id, evacuation_slots_buffer_);
|
| - DCHECK(object->map_word().IsForwardingAddress());
|
| - return true;
|
| + return false;
|
| }
|
|
|
| private:
|
| - Heap* heap_;
|
| CompactionSpaceCollection* compaction_spaces_;
|
| - SlotsBuffer** evacuation_slots_buffer_;
|
| };
|
|
|
|
|
| -bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
|
| - HeapObjectVisitor* visitor,
|
| - IterationMode mode) {
|
| - Address offsets[16];
|
| - for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
|
| - Address cell_base = it.CurrentCellBase();
|
| - MarkBit::CellType* cell = it.CurrentCell();
|
| -
|
| - if (*cell == 0) continue;
|
| -
|
| - int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets);
|
| - for (int i = 0; i < live_objects; i++) {
|
| - HeapObject* object = HeapObject::FromAddress(offsets[i]);
|
| - DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
|
| - if (!visitor->Visit(object)) {
|
| - if ((mode == kClearMarkbits) && (i > 0)) {
|
| - page->markbits()->ClearRange(
|
| - page->AddressToMarkbitIndex(page->area_start()),
|
| - page->AddressToMarkbitIndex(offsets[i]));
|
| - }
|
| - return false;
|
| - }
|
| - }
|
| - if (mode == kClearMarkbits) {
|
| - *cell = 0;
|
| - }
|
| - }
|
| - return true;
|
| -}
|
| -
|
| -
|
| void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
|
| PageIterator it(space);
|
| while (it.has_next()) {
|
| @@ -2994,28 +2984,6 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
|
| }
|
|
|
|
|
| -bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
|
| - int object_size) {
|
| - OldSpace* old_space = heap()->old_space();
|
| -
|
| - HeapObject* target = nullptr;
|
| - AllocationAlignment alignment = object->RequiredAlignment();
|
| - AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
|
| - if (allocation.To(&target)) {
|
| - MigrateObject(target, object, object_size, old_space->identity(),
|
| - &migration_slots_buffer_);
|
| - // If we end up needing more special cases, we should factor this out.
|
| - if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
|
| - heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
|
| - }
|
| - heap()->IncrementPromotedObjectsSize(object_size);
|
| - return true;
|
| - }
|
| -
|
| - return false;
|
| -}
|
| -
|
| -
|
| bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
|
| HeapObject** out_object) {
|
| Space* owner = p->owner();
|
| @@ -3188,11 +3156,11 @@ void MarkCompactCollector::EvacuateNewSpace() {
|
| // new entries in the store buffer and may cause some pages to be marked
|
| // scan-on-scavenge.
|
| NewSpacePageIterator it(from_bottom, from_top);
|
| - EvacuateNewSpaceVisitor new_space_visitor(heap());
|
| + EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_);
|
| while (it.has_next()) {
|
| NewSpacePage* p = it.next();
|
| survivors_size += p->LiveBytes();
|
| - bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
|
| + bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
|
| USE(ok);
|
| DCHECK(ok);
|
| }
|
| @@ -3394,7 +3362,7 @@ void MarkCompactCollector::EvacuatePages(
|
| double start = heap()->MonotonicallyIncreasingTimeInMs();
|
| intptr_t live_bytes = p->LiveBytes();
|
| AlwaysAllocateScope always_allocate(isolate());
|
| - if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) {
|
| + if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
|
| p->ResetLiveBytes();
|
| p->parallel_compaction_state().SetValue(
|
| MemoryChunk::kCompactingFinalize);
|
| @@ -3581,9 +3549,37 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
|
| }
|
|
|
|
|
| -void MarkCompactCollector::VisitLiveObjects(Page* page,
|
| - ObjectVisitor* visitor) {
|
| - // First pass on aborted pages.
|
| +bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
|
| + HeapObjectVisitor* visitor,
|
| + IterationMode mode) {
|
| + Address offsets[16];
|
| + for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
|
| + Address cell_base = it.CurrentCellBase();
|
| + MarkBit::CellType* cell = it.CurrentCell();
|
| + if (*cell == 0) continue;
|
| + int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets);
|
| + for (int i = 0; i < live_objects; i++) {
|
| + HeapObject* object = HeapObject::FromAddress(offsets[i]);
|
| + DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
|
| + if (!visitor->Visit(object)) {
|
| + if ((mode == kClearMarkbits) && (i > 0)) {
|
| + page->markbits()->ClearRange(
|
| + page->AddressToMarkbitIndex(page->area_start()),
|
| + page->AddressToMarkbitIndex(offsets[i]));
|
| + }
|
| + return false;
|
| + }
|
| + }
|
| + if (mode == kClearMarkbits) {
|
| + *cell = 0;
|
| + }
|
| + }
|
| + return true;
|
| +}
|
| +
|
| +
|
| +void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
|
| + ObjectVisitor* visitor) {
|
| Address starts[16];
|
| for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
|
| Address cell_base = it.CurrentCellBase();
|
| @@ -3728,7 +3724,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
| // First pass on aborted pages, fixing up all live objects.
|
| if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
|
| p->ClearEvacuationCandidate();
|
| - VisitLiveObjects(p, &updating_visitor);
|
| + VisitLiveObjectsBody(p, &updating_visitor);
|
| }
|
| }
|
|
|
|
|