OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 1515 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1526 } | 1526 } |
1527 | 1527 |
1528 | 1528 |
1529 class MarkCompactCollector::HeapObjectVisitor { | 1529 class MarkCompactCollector::HeapObjectVisitor { |
1530 public: | 1530 public: |
1531 virtual ~HeapObjectVisitor() {} | 1531 virtual ~HeapObjectVisitor() {} |
1532 virtual bool Visit(HeapObject* object) = 0; | 1532 virtual bool Visit(HeapObject* object) = 0; |
1533 }; | 1533 }; |
1534 | 1534 |
1535 | 1535 |
| 1536 class MarkCompactCollector::EvacuateVisitorBase |
| 1537 : public MarkCompactCollector::HeapObjectVisitor { |
| 1538 public: |
| 1539 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer) |
| 1540 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {} |
| 1541 |
| 1542 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, |
| 1543 HeapObject** target_object) { |
| 1544 int size = object->Size(); |
| 1545 AllocationAlignment alignment = object->RequiredAlignment(); |
| 1546 AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
| 1547 if (allocation.To(target_object)) { |
| 1548 heap_->mark_compact_collector()->MigrateObject( |
| 1549 *target_object, object, size, target_space->identity(), |
| 1550 evacuation_slots_buffer_); |
| 1551 return true; |
| 1552 } |
| 1553 return false; |
| 1554 } |
| 1555 |
| 1556 protected: |
| 1557 Heap* heap_; |
| 1558 SlotsBuffer** evacuation_slots_buffer_; |
| 1559 }; |
| 1560 |
| 1561 |
1536 class MarkCompactCollector::EvacuateNewSpaceVisitor | 1562 class MarkCompactCollector::EvacuateNewSpaceVisitor |
1537 : public MarkCompactCollector::HeapObjectVisitor { | 1563 : public MarkCompactCollector::EvacuateVisitorBase { |
1538 public: | 1564 public: |
1539 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {} | 1565 explicit EvacuateNewSpaceVisitor(Heap* heap, |
| 1566 SlotsBuffer** evacuation_slots_buffer) |
| 1567 : EvacuateVisitorBase(heap, evacuation_slots_buffer) {} |
1540 | 1568 |
1541 virtual bool Visit(HeapObject* object) { | 1569 bool Visit(HeapObject* object) override { |
1542 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); | 1570 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
1543 int size = object->Size(); | 1571 int size = object->Size(); |
1544 | 1572 HeapObject* target_object = nullptr; |
1545 // TODO(hpayer): Refactor EvacuateObject and call this function instead. | |
1546 if (heap_->ShouldBePromoted(object->address(), size) && | 1573 if (heap_->ShouldBePromoted(object->address(), size) && |
1547 heap_->mark_compact_collector()->TryPromoteObject(object, size)) { | 1574 TryEvacuateObject(heap_->old_space(), object, &target_object)) { |
| 1575 // If we end up needing more special cases, we should factor this out. |
| 1576 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { |
| 1577 heap_->array_buffer_tracker()->Promote( |
| 1578 JSArrayBuffer::cast(target_object)); |
| 1579 } |
| 1580 heap_->IncrementPromotedObjectsSize(size); |
1548 return true; | 1581 return true; |
1549 } | 1582 } |
1550 | 1583 |
1551 AllocationAlignment alignment = object->RequiredAlignment(); | 1584 AllocationAlignment alignment = object->RequiredAlignment(); |
1552 AllocationResult allocation = | 1585 AllocationResult allocation = |
1553 heap_->new_space()->AllocateRaw(size, alignment); | 1586 heap_->new_space()->AllocateRaw(size, alignment); |
1554 if (allocation.IsRetry()) { | 1587 if (allocation.IsRetry()) { |
1555 if (!heap_->new_space()->AddFreshPage()) { | 1588 if (!heap_->new_space()->AddFreshPage()) { |
1556 // Shouldn't happen. We are sweeping linearly, and to-space | 1589 // Shouldn't happen. We are sweeping linearly, and to-space |
1557 // has the same number of pages as from-space, so there is | 1590 // has the same number of pages as from-space, so there is |
1558 // always room unless we are in an OOM situation. | 1591 // always room unless we are in an OOM situation. |
1559 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); | 1592 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
1560 } | 1593 } |
1561 allocation = heap_->new_space()->AllocateRaw(size, alignment); | 1594 allocation = heap_->new_space()->AllocateRaw(size, alignment); |
1562 DCHECK(!allocation.IsRetry()); | 1595 DCHECK(!allocation.IsRetry()); |
1563 } | 1596 } |
1564 Object* target = allocation.ToObjectChecked(); | 1597 Object* target = allocation.ToObjectChecked(); |
1565 | 1598 |
1566 heap_->mark_compact_collector()->MigrateObject( | 1599 heap_->mark_compact_collector()->MigrateObject( |
1567 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); | 1600 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
1568 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | 1601 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
1569 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | 1602 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
1570 } | 1603 } |
1571 heap_->IncrementSemiSpaceCopiedObjectSize(size); | 1604 heap_->IncrementSemiSpaceCopiedObjectSize(size); |
1572 return true; | 1605 return true; |
1573 } | 1606 } |
1574 | |
1575 private: | |
1576 Heap* heap_; | |
1577 }; | 1607 }; |
1578 | 1608 |
1579 | 1609 |
1580 class MarkCompactCollector::EvacuateOldSpaceVisitor | 1610 class MarkCompactCollector::EvacuateOldSpaceVisitor |
1581 : public MarkCompactCollector::HeapObjectVisitor { | 1611 : public MarkCompactCollector::EvacuateVisitorBase { |
1582 public: | 1612 public: |
1583 EvacuateOldSpaceVisitor(Heap* heap, | 1613 EvacuateOldSpaceVisitor(Heap* heap, |
1584 CompactionSpaceCollection* compaction_spaces, | 1614 CompactionSpaceCollection* compaction_spaces, |
1585 SlotsBuffer** evacuation_slots_buffer) | 1615 SlotsBuffer** evacuation_slots_buffer) |
1586 : heap_(heap), | 1616 : EvacuateVisitorBase(heap, evacuation_slots_buffer), |
1587 compaction_spaces_(compaction_spaces), | 1617 compaction_spaces_(compaction_spaces) {} |
1588 evacuation_slots_buffer_(evacuation_slots_buffer) {} | |
1589 | 1618 |
1590 virtual bool Visit(HeapObject* object) { | 1619 bool Visit(HeapObject* object) override { |
1591 int size = object->Size(); | 1620 CompactionSpace* target_space = compaction_spaces_->Get( |
1592 AllocationAlignment alignment = object->RequiredAlignment(); | 1621 Page::FromAddress(object->address())->owner()->identity()); |
1593 HeapObject* target_object = nullptr; | 1622 HeapObject* target_object = nullptr; |
1594 AllocationSpace id = | 1623 if (TryEvacuateObject(target_space, object, &target_object)) { |
1595 Page::FromAddress(object->address())->owner()->identity(); | 1624 DCHECK(object->map_word().IsForwardingAddress()); |
1596 AllocationResult allocation = | 1625 return true; |
1597 compaction_spaces_->Get(id)->AllocateRaw(size, alignment); | |
1598 if (!allocation.To(&target_object)) { | |
1599 return false; | |
1600 } | 1626 } |
1601 heap_->mark_compact_collector()->MigrateObject( | 1627 return false; |
1602 target_object, object, size, id, evacuation_slots_buffer_); | |
1603 DCHECK(object->map_word().IsForwardingAddress()); | |
1604 return true; | |
1605 } | 1628 } |
1606 | 1629 |
1607 private: | 1630 private: |
1608 Heap* heap_; | |
1609 CompactionSpaceCollection* compaction_spaces_; | 1631 CompactionSpaceCollection* compaction_spaces_; |
1610 SlotsBuffer** evacuation_slots_buffer_; | |
1611 }; | 1632 }; |
1612 | 1633 |
1613 | 1634 |
1614 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, | |
1615 HeapObjectVisitor* visitor, | |
1616 IterationMode mode) { | |
1617 Address offsets[16]; | |
1618 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { | |
1619 Address cell_base = it.CurrentCellBase(); | |
1620 MarkBit::CellType* cell = it.CurrentCell(); | |
1621 | |
1622 if (*cell == 0) continue; | |
1623 | |
1624 int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets); | |
1625 for (int i = 0; i < live_objects; i++) { | |
1626 HeapObject* object = HeapObject::FromAddress(offsets[i]); | |
1627 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | |
1628 if (!visitor->Visit(object)) { | |
1629 if ((mode == kClearMarkbits) && (i > 0)) { | |
1630 page->markbits()->ClearRange( | |
1631 page->AddressToMarkbitIndex(page->area_start()), | |
1632 page->AddressToMarkbitIndex(offsets[i])); | |
1633 } | |
1634 return false; | |
1635 } | |
1636 } | |
1637 if (mode == kClearMarkbits) { | |
1638 *cell = 0; | |
1639 } | |
1640 } | |
1641 return true; | |
1642 } | |
1643 | |
1644 | |
1645 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 1635 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
1646 PageIterator it(space); | 1636 PageIterator it(space); |
1647 while (it.has_next()) { | 1637 while (it.has_next()) { |
1648 Page* p = it.next(); | 1638 Page* p = it.next(); |
1649 DiscoverGreyObjectsOnPage(p); | 1639 DiscoverGreyObjectsOnPage(p); |
1650 if (marking_deque()->IsFull()) return; | 1640 if (marking_deque()->IsFull()) return; |
1651 } | 1641 } |
1652 } | 1642 } |
1653 | 1643 |
1654 | 1644 |
(...skipping 1332 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2987 MapWord map_word = HeapObject::cast(*p)->map_word(); | 2977 MapWord map_word = HeapObject::cast(*p)->map_word(); |
2988 | 2978 |
2989 if (map_word.IsForwardingAddress()) { | 2979 if (map_word.IsForwardingAddress()) { |
2990 return String::cast(map_word.ToForwardingAddress()); | 2980 return String::cast(map_word.ToForwardingAddress()); |
2991 } | 2981 } |
2992 | 2982 |
2993 return String::cast(*p); | 2983 return String::cast(*p); |
2994 } | 2984 } |
2995 | 2985 |
2996 | 2986 |
2997 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, | |
2998 int object_size) { | |
2999 OldSpace* old_space = heap()->old_space(); | |
3000 | |
3001 HeapObject* target = nullptr; | |
3002 AllocationAlignment alignment = object->RequiredAlignment(); | |
3003 AllocationResult allocation = old_space->AllocateRaw(object_size, alignment); | |
3004 if (allocation.To(&target)) { | |
3005 MigrateObject(target, object, object_size, old_space->identity(), | |
3006 &migration_slots_buffer_); | |
3007 // If we end up needing more special cases, we should factor this out. | |
3008 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | |
3009 heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target)); | |
3010 } | |
3011 heap()->IncrementPromotedObjectsSize(object_size); | |
3012 return true; | |
3013 } | |
3014 | |
3015 return false; | |
3016 } | |
3017 | |
3018 | |
3019 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot, | 2987 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot, |
3020 HeapObject** out_object) { | 2988 HeapObject** out_object) { |
3021 Space* owner = p->owner(); | 2989 Space* owner = p->owner(); |
3022 if (owner == heap_->lo_space() || owner == NULL) { | 2990 if (owner == heap_->lo_space() || owner == NULL) { |
3023 Object* large_object = heap_->lo_space()->FindObject(slot); | 2991 Object* large_object = heap_->lo_space()->FindObject(slot); |
3024 // This object has to exist, otherwise we would not have recorded a slot | 2992 // This object has to exist, otherwise we would not have recorded a slot |
3025 // for it. | 2993 // for it. |
3026 CHECK(large_object->IsHeapObject()); | 2994 CHECK(large_object->IsHeapObject()); |
3027 HeapObject* large_heap_object = HeapObject::cast(large_object); | 2995 HeapObject* large_heap_object = HeapObject::cast(large_object); |
3028 if (IsMarked(large_heap_object)) { | 2996 if (IsMarked(large_heap_object)) { |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3181 new_space->Flip(); | 3149 new_space->Flip(); |
3182 new_space->ResetAllocationInfo(); | 3150 new_space->ResetAllocationInfo(); |
3183 | 3151 |
3184 int survivors_size = 0; | 3152 int survivors_size = 0; |
3185 | 3153 |
3186 // First pass: traverse all objects in inactive semispace, remove marks, | 3154 // First pass: traverse all objects in inactive semispace, remove marks, |
3187 // migrate live objects and write forwarding addresses. This stage puts | 3155 // migrate live objects and write forwarding addresses. This stage puts |
3188 // new entries in the store buffer and may cause some pages to be marked | 3156 // new entries in the store buffer and may cause some pages to be marked |
3189 // scan-on-scavenge. | 3157 // scan-on-scavenge. |
3190 NewSpacePageIterator it(from_bottom, from_top); | 3158 NewSpacePageIterator it(from_bottom, from_top); |
3191 EvacuateNewSpaceVisitor new_space_visitor(heap()); | 3159 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_); |
3192 while (it.has_next()) { | 3160 while (it.has_next()) { |
3193 NewSpacePage* p = it.next(); | 3161 NewSpacePage* p = it.next(); |
3194 survivors_size += p->LiveBytes(); | 3162 survivors_size += p->LiveBytes(); |
3195 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); | 3163 bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits); |
3196 USE(ok); | 3164 USE(ok); |
3197 DCHECK(ok); | 3165 DCHECK(ok); |
3198 } | 3166 } |
3199 | 3167 |
3200 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 3168 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
3201 new_space->set_age_mark(new_space->top()); | 3169 new_space->set_age_mark(new_space->top()); |
3202 } | 3170 } |
3203 | 3171 |
3204 | 3172 |
3205 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( | 3173 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( |
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3387 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == | 3355 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == |
3388 MemoryChunk::kSweepingDone); | 3356 MemoryChunk::kSweepingDone); |
3389 if (p->parallel_compaction_state().TrySetValue( | 3357 if (p->parallel_compaction_state().TrySetValue( |
3390 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | 3358 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { |
3391 if (p->IsEvacuationCandidate()) { | 3359 if (p->IsEvacuationCandidate()) { |
3392 DCHECK_EQ(p->parallel_compaction_state().Value(), | 3360 DCHECK_EQ(p->parallel_compaction_state().Value(), |
3393 MemoryChunk::kCompactingInProgress); | 3361 MemoryChunk::kCompactingInProgress); |
3394 double start = heap()->MonotonicallyIncreasingTimeInMs(); | 3362 double start = heap()->MonotonicallyIncreasingTimeInMs(); |
3395 intptr_t live_bytes = p->LiveBytes(); | 3363 intptr_t live_bytes = p->LiveBytes(); |
3396 AlwaysAllocateScope always_allocate(isolate()); | 3364 AlwaysAllocateScope always_allocate(isolate()); |
3397 if (IterateLiveObjectsOnPage(p, &visitor, kClearMarkbits)) { | 3365 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) { |
3398 p->ResetLiveBytes(); | 3366 p->ResetLiveBytes(); |
3399 p->parallel_compaction_state().SetValue( | 3367 p->parallel_compaction_state().SetValue( |
3400 MemoryChunk::kCompactingFinalize); | 3368 MemoryChunk::kCompactingFinalize); |
3401 compaction_spaces->ReportCompactionProgress( | 3369 compaction_spaces->ReportCompactionProgress( |
3402 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); | 3370 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); |
3403 } else { | 3371 } else { |
3404 p->parallel_compaction_state().SetValue( | 3372 p->parallel_compaction_state().SetValue( |
3405 MemoryChunk::kCompactingAborted); | 3373 MemoryChunk::kCompactingAborted); |
3406 } | 3374 } |
3407 } else { | 3375 } else { |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3574 DCHECK(p->IsEvacuationCandidate() || | 3542 DCHECK(p->IsEvacuationCandidate() || |
3575 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3543 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3576 if (p->IsEvacuationCandidate()) { | 3544 if (p->IsEvacuationCandidate()) { |
3577 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, | 3545 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, |
3578 end_slot); | 3546 end_slot); |
3579 } | 3547 } |
3580 } | 3548 } |
3581 } | 3549 } |
3582 | 3550 |
3583 | 3551 |
3584 void MarkCompactCollector::VisitLiveObjects(Page* page, | 3552 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, |
3585 ObjectVisitor* visitor) { | 3553 HeapObjectVisitor* visitor, |
3586 // First pass on aborted pages. | 3554 IterationMode mode) { |
| 3555 Address offsets[16]; |
| 3556 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { |
| 3557 Address cell_base = it.CurrentCellBase(); |
| 3558 MarkBit::CellType* cell = it.CurrentCell(); |
| 3559 if (*cell == 0) continue; |
| 3560 int live_objects = MarkWordToObjectStarts(*cell, cell_base, offsets); |
| 3561 for (int i = 0; i < live_objects; i++) { |
| 3562 HeapObject* object = HeapObject::FromAddress(offsets[i]); |
| 3563 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3564 if (!visitor->Visit(object)) { |
| 3565 if ((mode == kClearMarkbits) && (i > 0)) { |
| 3566 page->markbits()->ClearRange( |
| 3567 page->AddressToMarkbitIndex(page->area_start()), |
| 3568 page->AddressToMarkbitIndex(offsets[i])); |
| 3569 } |
| 3570 return false; |
| 3571 } |
| 3572 } |
| 3573 if (mode == kClearMarkbits) { |
| 3574 *cell = 0; |
| 3575 } |
| 3576 } |
| 3577 return true; |
| 3578 } |
| 3579 |
| 3580 |
| 3581 void MarkCompactCollector::VisitLiveObjectsBody(Page* page, |
| 3582 ObjectVisitor* visitor) { |
3587 Address starts[16]; | 3583 Address starts[16]; |
3588 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { | 3584 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { |
3589 Address cell_base = it.CurrentCellBase(); | 3585 Address cell_base = it.CurrentCellBase(); |
3590 MarkBit::CellType* cell = it.CurrentCell(); | 3586 MarkBit::CellType* cell = it.CurrentCell(); |
3591 if (*cell == 0) continue; | 3587 if (*cell == 0) continue; |
3592 int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts); | 3588 int live_objects = MarkWordToObjectStarts(*cell, cell_base, starts); |
3593 for (int i = 0; i < live_objects; i++) { | 3589 for (int i = 0; i < live_objects; i++) { |
3594 HeapObject* live_object = HeapObject::FromAddress(starts[i]); | 3590 HeapObject* live_object = HeapObject::FromAddress(starts[i]); |
3595 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object))); | 3591 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object))); |
3596 Map* map = live_object->synchronized_map(); | 3592 Map* map = live_object->synchronized_map(); |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3721 | 3717 |
3722 // Important: skip list should be cleared only after roots were updated | 3718 // Important: skip list should be cleared only after roots were updated |
3723 // because root iteration traverses the stack and might have to find | 3719 // because root iteration traverses the stack and might have to find |
3724 // code objects from non-updated pc pointing into evacuation candidate. | 3720 // code objects from non-updated pc pointing into evacuation candidate. |
3725 SkipList* list = p->skip_list(); | 3721 SkipList* list = p->skip_list(); |
3726 if (list != NULL) list->Clear(); | 3722 if (list != NULL) list->Clear(); |
3727 | 3723 |
3728 // First pass on aborted pages, fixing up all live objects. | 3724 // First pass on aborted pages, fixing up all live objects. |
3729 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3725 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
3730 p->ClearEvacuationCandidate(); | 3726 p->ClearEvacuationCandidate(); |
3731 VisitLiveObjects(p, &updating_visitor); | 3727 VisitLiveObjectsBody(p, &updating_visitor); |
3732 } | 3728 } |
3733 } | 3729 } |
3734 | 3730 |
3735 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | 3731 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
3736 if (FLAG_gc_verbose) { | 3732 if (FLAG_gc_verbose) { |
3737 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | 3733 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
3738 reinterpret_cast<intptr_t>(p)); | 3734 reinterpret_cast<intptr_t>(p)); |
3739 } | 3735 } |
3740 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3736 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3741 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 3737 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4157 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4153 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4158 if (Marking::IsBlack(mark_bit)) { | 4154 if (Marking::IsBlack(mark_bit)) { |
4159 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4155 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
4160 RecordRelocSlot(&rinfo, target); | 4156 RecordRelocSlot(&rinfo, target); |
4161 } | 4157 } |
4162 } | 4158 } |
4163 } | 4159 } |
4164 | 4160 |
4165 } // namespace internal | 4161 } // namespace internal |
4166 } // namespace v8 | 4162 } // namespace v8 |
OLD | NEW |