Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1494533002: Reland of [heap] Unify evacuating an object for new and old generation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1538 matching lines...) Expand 10 before | Expand all | Expand 10 after
1549 } 1549 }
1550 1550
1551 1551
1552 class MarkCompactCollector::HeapObjectVisitor { 1552 class MarkCompactCollector::HeapObjectVisitor {
1553 public: 1553 public:
1554 virtual ~HeapObjectVisitor() {} 1554 virtual ~HeapObjectVisitor() {}
1555 virtual bool Visit(HeapObject* object) = 0; 1555 virtual bool Visit(HeapObject* object) = 0;
1556 }; 1556 };
1557 1557
1558 1558
1559 class MarkCompactCollector::EvacuateVisitorBase
1560 : public MarkCompactCollector::HeapObjectVisitor {
1561 public:
1562 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
1563 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
1564
1565 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1566 HeapObject** target_object) {
1567 int size = object->Size();
1568 AllocationAlignment alignment = object->RequiredAlignment();
1569 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1570 if (allocation.To(target_object)) {
1571 heap_->mark_compact_collector()->MigrateObject(
1572 *target_object, object, size, target_space->identity(),
1573 evacuation_slots_buffer_);
1574 return true;
1575 }
1576 return false;
1577 }
1578
1579 protected:
1580 Heap* heap_;
1581 SlotsBuffer** evacuation_slots_buffer_;
1582 };
1583
1584
1559 class MarkCompactCollector::EvacuateNewSpaceVisitor 1585 class MarkCompactCollector::EvacuateNewSpaceVisitor
1560 : public MarkCompactCollector::HeapObjectVisitor { 1586 : public MarkCompactCollector::EvacuateVisitorBase {
1561 public: 1587 public:
1562 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {} 1588 explicit EvacuateNewSpaceVisitor(Heap* heap,
1589 SlotsBuffer** evacuation_slots_buffer)
1590 : EvacuateVisitorBase(heap, evacuation_slots_buffer) {}
1563 1591
1564 virtual bool Visit(HeapObject* object) { 1592 virtual bool Visit(HeapObject* object) {
1565 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); 1593 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1566 int size = object->Size(); 1594 int size = object->Size();
1567 1595 HeapObject* target_object = nullptr;
1568 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1569 if (heap_->ShouldBePromoted(object->address(), size) && 1596 if (heap_->ShouldBePromoted(object->address(), size) &&
1570 heap_->mark_compact_collector()->TryPromoteObject(object, size)) { 1597 TryEvacuateObject(heap_->old_space(), object, &target_object)) {
1598 // If we end up needing more special cases, we should factor this out.
1599 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1600 heap_->array_buffer_tracker()->Promote(
1601 JSArrayBuffer::cast(target_object));
1602 }
1603 heap_->IncrementPromotedObjectsSize(size);
1571 return true; 1604 return true;
1572 } 1605 }
1573 1606
1574 AllocationAlignment alignment = object->RequiredAlignment(); 1607 AllocationAlignment alignment = object->RequiredAlignment();
1575 AllocationResult allocation = 1608 AllocationResult allocation =
1576 heap_->new_space()->AllocateRaw(size, alignment); 1609 heap_->new_space()->AllocateRaw(size, alignment);
1577 if (allocation.IsRetry()) { 1610 if (allocation.IsRetry()) {
1578 if (!heap_->new_space()->AddFreshPage()) { 1611 if (!heap_->new_space()->AddFreshPage()) {
1579 // Shouldn't happen. We are sweeping linearly, and to-space 1612 // Shouldn't happen. We are sweeping linearly, and to-space
1580 // has the same number of pages as from-space, so there is 1613 // has the same number of pages as from-space, so there is
1581 // always room unless we are in an OOM situation. 1614 // always room unless we are in an OOM situation.
1582 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); 1615 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1583 } 1616 }
1584 allocation = heap_->new_space()->AllocateRaw(size, alignment); 1617 allocation = heap_->new_space()->AllocateRaw(size, alignment);
1585 DCHECK(!allocation.IsRetry()); 1618 DCHECK(!allocation.IsRetry());
1586 } 1619 }
1587 Object* target = allocation.ToObjectChecked(); 1620 Object* target = allocation.ToObjectChecked();
1588 1621
1589 heap_->mark_compact_collector()->MigrateObject( 1622 heap_->mark_compact_collector()->MigrateObject(
1590 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); 1623 HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1591 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 1624 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1592 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); 1625 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1593 } 1626 }
1594 heap_->IncrementSemiSpaceCopiedObjectSize(size); 1627 heap_->IncrementSemiSpaceCopiedObjectSize(size);
1595 return true; 1628 return true;
1596 } 1629 }
1597
1598 private:
1599 Heap* heap_;
1600 }; 1630 };
1601 1631
1602 1632
1603 class MarkCompactCollector::EvacuateOldSpaceVisitor 1633 class MarkCompactCollector::EvacuateOldSpaceVisitor
1604 : public MarkCompactCollector::HeapObjectVisitor { 1634 : public MarkCompactCollector::EvacuateVisitorBase {
1605 public: 1635 public:
1606 EvacuateOldSpaceVisitor(Heap* heap, 1636 EvacuateOldSpaceVisitor(Heap* heap,
1607 CompactionSpaceCollection* compaction_spaces, 1637 CompactionSpaceCollection* compaction_spaces,
1608 SlotsBuffer** evacuation_slots_buffer) 1638 SlotsBuffer** evacuation_slots_buffer)
1609 : heap_(heap), 1639 : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1610 compaction_spaces_(compaction_spaces), 1640 compaction_spaces_(compaction_spaces) {}
1611 evacuation_slots_buffer_(evacuation_slots_buffer) {}
1612 1641
1613 virtual bool Visit(HeapObject* object) { 1642 virtual bool Visit(HeapObject* object) {
1614 int size = object->Size(); 1643 CompactionSpace* target_space = compaction_spaces_->Get(
1615 AllocationAlignment alignment = object->RequiredAlignment(); 1644 Page::FromAddress(object->address())->owner()->identity());
1616 HeapObject* target_object = nullptr; 1645 HeapObject* target_object = nullptr;
1617 AllocationSpace id = 1646 if (TryEvacuateObject(target_space, object, &target_object)) {
1618 Page::FromAddress(object->address())->owner()->identity(); 1647 DCHECK(object->map_word().IsForwardingAddress());
1619 AllocationResult allocation = 1648 return true;
1620 compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
1621 if (!allocation.To(&target_object)) {
1622 return false;
1623 } 1649 }
1624 heap_->mark_compact_collector()->MigrateObject( 1650 return false;
1625 target_object, object, size, id, evacuation_slots_buffer_);
1626 DCHECK(object->map_word().IsForwardingAddress());
1627 return true;
1628 } 1651 }
1629 1652
1630 private: 1653 private:
1631 Heap* heap_;
1632 CompactionSpaceCollection* compaction_spaces_; 1654 CompactionSpaceCollection* compaction_spaces_;
1633 SlotsBuffer** evacuation_slots_buffer_;
1634 }; 1655 };
1635 1656
1636 1657
1637 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, 1658 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
1638 HeapObjectVisitor* visitor, 1659 HeapObjectVisitor* visitor,
1639 IterationMode mode) { 1660 IterationMode mode) {
1640 Address offsets[16]; 1661 Address offsets[16];
1641 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { 1662 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
1642 Address cell_base = it.CurrentCellBase(); 1663 Address cell_base = it.CurrentCellBase();
1643 MarkBit::CellType* cell = it.CurrentCell(); 1664 MarkBit::CellType* cell = it.CurrentCell();
(...skipping 1338 matching lines...) Expand 10 before | Expand all | Expand 10 after
2982 MapWord map_word = HeapObject::cast(*p)->map_word(); 3003 MapWord map_word = HeapObject::cast(*p)->map_word();
2983 3004
2984 if (map_word.IsForwardingAddress()) { 3005 if (map_word.IsForwardingAddress()) {
2985 return String::cast(map_word.ToForwardingAddress()); 3006 return String::cast(map_word.ToForwardingAddress());
2986 } 3007 }
2987 3008
2988 return String::cast(*p); 3009 return String::cast(*p);
2989 } 3010 }
2990 3011
2991 3012
2992 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2993 int object_size) {
2994 OldSpace* old_space = heap()->old_space();
2995
2996 HeapObject* target = nullptr;
2997 AllocationAlignment alignment = object->RequiredAlignment();
2998 AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
2999 if (allocation.To(&target)) {
3000 MigrateObject(target, object, object_size, old_space->identity(),
3001 &migration_slots_buffer_);
3002 // If we end up needing more special cases, we should factor this out.
3003 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
3004 heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
3005 }
3006 heap()->IncrementPromotedObjectsSize(object_size);
3007 return true;
3008 }
3009
3010 return false;
3011 }
3012
3013
3014 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot, 3013 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
3015 HeapObject** out_object) { 3014 HeapObject** out_object) {
3016 Space* owner = p->owner(); 3015 Space* owner = p->owner();
3017 if (owner == heap_->lo_space() || owner == NULL) { 3016 if (owner == heap_->lo_space() || owner == NULL) {
3018 Object* large_object = heap_->lo_space()->FindObject(slot); 3017 Object* large_object = heap_->lo_space()->FindObject(slot);
3019 // This object has to exist, otherwise we would not have recorded a slot 3018 // This object has to exist, otherwise we would not have recorded a slot
3020 // for it. 3019 // for it.
3021 CHECK(large_object->IsHeapObject()); 3020 CHECK(large_object->IsHeapObject());
3022 HeapObject* large_heap_object = HeapObject::cast(large_object); 3021 HeapObject* large_heap_object = HeapObject::cast(large_object);
3023 if (IsMarked(large_heap_object)) { 3022 if (IsMarked(large_heap_object)) {
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
3176 new_space->Flip(); 3175 new_space->Flip();
3177 new_space->ResetAllocationInfo(); 3176 new_space->ResetAllocationInfo();
3178 3177
3179 int survivors_size = 0; 3178 int survivors_size = 0;
3180 3179
3181 // First pass: traverse all objects in inactive semispace, remove marks, 3180 // First pass: traverse all objects in inactive semispace, remove marks,
3182 // migrate live objects and write forwarding addresses. This stage puts 3181 // migrate live objects and write forwarding addresses. This stage puts
3183 // new entries in the store buffer and may cause some pages to be marked 3182 // new entries in the store buffer and may cause some pages to be marked
3184 // scan-on-scavenge. 3183 // scan-on-scavenge.
3185 NewSpacePageIterator it(from_bottom, from_top); 3184 NewSpacePageIterator it(from_bottom, from_top);
3186 EvacuateNewSpaceVisitor new_space_visitor(heap()); 3185 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_);
3187 while (it.has_next()) { 3186 while (it.has_next()) {
3188 NewSpacePage* p = it.next(); 3187 NewSpacePage* p = it.next();
3189 survivors_size += p->LiveBytes(); 3188 survivors_size += p->LiveBytes();
3190 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); 3189 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
3191 USE(ok); 3190 USE(ok);
3192 DCHECK(ok); 3191 DCHECK(ok);
3193 } 3192 }
3194 3193
3195 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3194 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3196 new_space->set_age_mark(new_space->top()); 3195 new_space->set_age_mark(new_space->top());
(...skipping 971 matching lines...) Expand 10 before | Expand all | Expand 10 after
4168 MarkBit mark_bit = Marking::MarkBitFrom(host); 4167 MarkBit mark_bit = Marking::MarkBitFrom(host);
4169 if (Marking::IsBlack(mark_bit)) { 4168 if (Marking::IsBlack(mark_bit)) {
4170 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4169 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4171 RecordRelocSlot(&rinfo, target); 4170 RecordRelocSlot(&rinfo, target);
4172 } 4171 }
4173 } 4172 }
4174 } 4173 }
4175 4174
4176 } // namespace internal 4175 } // namespace internal
4177 } // namespace v8 4176 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698