Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(560)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1483963004: Revert of [heap] Unify evacuating an object for new and old generation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1538 matching lines...) Expand 10 before | Expand all | Expand 10 after
1549 } 1549 }
1550 1550
1551 1551
1552 class MarkCompactCollector::HeapObjectVisitor { 1552 class MarkCompactCollector::HeapObjectVisitor {
1553 public: 1553 public:
1554 virtual ~HeapObjectVisitor() {} 1554 virtual ~HeapObjectVisitor() {}
1555 virtual bool Visit(HeapObject* object) = 0; 1555 virtual bool Visit(HeapObject* object) = 0;
1556 }; 1556 };
1557 1557
1558 1558
1559 class MarkCompactCollector::EvacuateVisitorBase
1560 : public MarkCompactCollector::HeapObjectVisitor {
1561 public:
1562 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
1563 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
1564
1565 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1566 HeapObject** target_object) {
1567 int size = object->Size();
1568 AllocationAlignment alignment = object->RequiredAlignment();
1569 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1570 if (allocation.To(target_object)) {
1571 heap_->mark_compact_collector()->MigrateObject(
1572 *target_object, object, size, target_space->identity(),
1573 evacuation_slots_buffer_);
1574 return true;
1575 }
1576 return false;
1577 }
1578
1579 protected:
1580 Heap* heap_;
1581 SlotsBuffer** evacuation_slots_buffer_;
1582 };
1583
1584
1585 class MarkCompactCollector::EvacuateNewSpaceVisitor 1559 class MarkCompactCollector::EvacuateNewSpaceVisitor
1586 : public MarkCompactCollector::EvacuateVisitorBase { 1560 : public MarkCompactCollector::HeapObjectVisitor {
1587 public: 1561 public:
1588 explicit EvacuateNewSpaceVisitor(Heap* heap, 1562 explicit EvacuateNewSpaceVisitor(Heap* heap) : heap_(heap) {}
1589 SlotsBuffer** evacuation_slots_buffer)
1590 : EvacuateVisitorBase(heap, evacuation_slots_buffer) {}
1591 1563
1592 virtual bool Visit(HeapObject* object) { 1564 virtual bool Visit(HeapObject* object) {
1593 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); 1565 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1594 int size = object->Size(); 1566 int size = object->Size();
1595 HeapObject* target_object = nullptr; 1567
1568 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1596 if (heap_->ShouldBePromoted(object->address(), size) && 1569 if (heap_->ShouldBePromoted(object->address(), size) &&
1597 TryEvacuateObject(heap_->old_space(), object, &target_object)) { 1570 heap_->mark_compact_collector()->TryPromoteObject(object, size)) {
1598 // If we end up needing more special cases, we should factor this out.
1599 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1600 heap_->array_buffer_tracker()->Promote(
1601 JSArrayBuffer::cast(target_object));
1602 }
1603 heap_->IncrementPromotedObjectsSize(size);
1604 return true; 1571 return true;
1605 } 1572 }
1606 1573
1607 AllocationAlignment alignment = object->RequiredAlignment(); 1574 AllocationAlignment alignment = object->RequiredAlignment();
1608 AllocationResult allocation = 1575 AllocationResult allocation =
1609 heap_->new_space()->AllocateRaw(size, alignment); 1576 heap_->new_space()->AllocateRaw(size, alignment);
1610 if (allocation.IsRetry()) { 1577 if (allocation.IsRetry()) {
1611 if (!heap_->new_space()->AddFreshPage()) { 1578 if (!heap_->new_space()->AddFreshPage()) {
1612 // Shouldn't happen. We are sweeping linearly, and to-space 1579 // Shouldn't happen. We are sweeping linearly, and to-space
1613 // has the same number of pages as from-space, so there is 1580 // has the same number of pages as from-space, so there is
1614 // always room unless we are in an OOM situation. 1581 // always room unless we are in an OOM situation.
1615 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); 1582 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1616 } 1583 }
1617 allocation = heap_->new_space()->AllocateRaw(size, alignment); 1584 allocation = heap_->new_space()->AllocateRaw(size, alignment);
1618 DCHECK(!allocation.IsRetry()); 1585 DCHECK(!allocation.IsRetry());
1619 } 1586 }
1620 Object* target = allocation.ToObjectChecked(); 1587 Object* target = allocation.ToObjectChecked();
1621 1588
1622 heap_->mark_compact_collector()->MigrateObject( 1589 heap_->mark_compact_collector()->MigrateObject(
1623 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); 1590 HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1624 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 1591 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1625 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); 1592 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1626 } 1593 }
1627 heap_->IncrementSemiSpaceCopiedObjectSize(size); 1594 heap_->IncrementSemiSpaceCopiedObjectSize(size);
1628 return true; 1595 return true;
1629 } 1596 }
1597
1598 private:
1599 Heap* heap_;
1630 }; 1600 };
1631 1601
1632 1602
1633 class MarkCompactCollector::EvacuateOldSpaceVisitor 1603 class MarkCompactCollector::EvacuateOldSpaceVisitor
1634 : public MarkCompactCollector::EvacuateVisitorBase { 1604 : public MarkCompactCollector::HeapObjectVisitor {
1635 public: 1605 public:
1636 EvacuateOldSpaceVisitor(Heap* heap, 1606 EvacuateOldSpaceVisitor(Heap* heap,
1637 CompactionSpaceCollection* compaction_spaces, 1607 CompactionSpaceCollection* compaction_spaces,
1638 SlotsBuffer** evacuation_slots_buffer) 1608 SlotsBuffer** evacuation_slots_buffer)
1639 : EvacuateVisitorBase(heap, evacuation_slots_buffer), 1609 : heap_(heap),
1640 compaction_spaces_(compaction_spaces) {} 1610 compaction_spaces_(compaction_spaces),
1611 evacuation_slots_buffer_(evacuation_slots_buffer) {}
1641 1612
1642 virtual bool Visit(HeapObject* object) { 1613 virtual bool Visit(HeapObject* object) {
1643 CompactionSpace* target_space = compaction_spaces_->Get( 1614 int size = object->Size();
1644 Page::FromAddress(object->address())->owner()->identity()); 1615 AllocationAlignment alignment = object->RequiredAlignment();
1645 HeapObject* target_object = nullptr; 1616 HeapObject* target_object = nullptr;
1646 if (TryEvacuateObject(target_space, object, &target_object)) { 1617 AllocationSpace id =
1647 DCHECK(object->map_word().IsForwardingAddress()); 1618 Page::FromAddress(object->address())->owner()->identity();
1648 return true; 1619 AllocationResult allocation =
1620 compaction_spaces_->Get(id)->AllocateRaw(size, alignment);
1621 if (!allocation.To(&target_object)) {
1622 return false;
1649 } 1623 }
1650 return false; 1624 heap_->mark_compact_collector()->MigrateObject(
1625 target_object, object, size, id, evacuation_slots_buffer_);
1626 DCHECK(object->map_word().IsForwardingAddress());
1627 return true;
1651 } 1628 }
1652 1629
1653 private: 1630 private:
1631 Heap* heap_;
1654 CompactionSpaceCollection* compaction_spaces_; 1632 CompactionSpaceCollection* compaction_spaces_;
1633 SlotsBuffer** evacuation_slots_buffer_;
1655 }; 1634 };
1656 1635
1657 1636
1658 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page, 1637 bool MarkCompactCollector::IterateLiveObjectsOnPage(MemoryChunk* page,
1659 HeapObjectVisitor* visitor, 1638 HeapObjectVisitor* visitor,
1660 IterationMode mode) { 1639 IterationMode mode) {
1661 Address offsets[16]; 1640 Address offsets[16];
1662 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) { 1641 for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
1663 Address cell_base = it.CurrentCellBase(); 1642 Address cell_base = it.CurrentCellBase();
1664 MarkBit::CellType* cell = it.CurrentCell(); 1643 MarkBit::CellType* cell = it.CurrentCell();
(...skipping 1338 matching lines...) Expand 10 before | Expand all | Expand 10 after
3003 MapWord map_word = HeapObject::cast(*p)->map_word(); 2982 MapWord map_word = HeapObject::cast(*p)->map_word();
3004 2983
3005 if (map_word.IsForwardingAddress()) { 2984 if (map_word.IsForwardingAddress()) {
3006 return String::cast(map_word.ToForwardingAddress()); 2985 return String::cast(map_word.ToForwardingAddress());
3007 } 2986 }
3008 2987
3009 return String::cast(*p); 2988 return String::cast(*p);
3010 } 2989 }
3011 2990
3012 2991
2992 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2993 int object_size) {
2994 OldSpace* old_space = heap()->old_space();
2995
2996 HeapObject* target = nullptr;
2997 AllocationAlignment alignment = object->RequiredAlignment();
2998 AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
2999 if (allocation.To(&target)) {
3000 MigrateObject(target, object, object_size, old_space->identity(),
3001 &migration_slots_buffer_);
3002 // If we end up needing more special cases, we should factor this out.
3003 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
3004 heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
3005 }
3006 heap()->IncrementPromotedObjectsSize(object_size);
3007 return true;
3008 }
3009
3010 return false;
3011 }
3012
3013
3013 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot, 3014 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
3014 HeapObject** out_object) { 3015 HeapObject** out_object) {
3015 Space* owner = p->owner(); 3016 Space* owner = p->owner();
3016 if (owner == heap_->lo_space() || owner == NULL) { 3017 if (owner == heap_->lo_space() || owner == NULL) {
3017 Object* large_object = heap_->lo_space()->FindObject(slot); 3018 Object* large_object = heap_->lo_space()->FindObject(slot);
3018 // This object has to exist, otherwise we would not have recorded a slot 3019 // This object has to exist, otherwise we would not have recorded a slot
3019 // for it. 3020 // for it.
3020 CHECK(large_object->IsHeapObject()); 3021 CHECK(large_object->IsHeapObject());
3021 HeapObject* large_heap_object = HeapObject::cast(large_object); 3022 HeapObject* large_heap_object = HeapObject::cast(large_object);
3022 if (IsMarked(large_heap_object)) { 3023 if (IsMarked(large_heap_object)) {
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
3175 new_space->Flip(); 3176 new_space->Flip();
3176 new_space->ResetAllocationInfo(); 3177 new_space->ResetAllocationInfo();
3177 3178
3178 int survivors_size = 0; 3179 int survivors_size = 0;
3179 3180
3180 // First pass: traverse all objects in inactive semispace, remove marks, 3181 // First pass: traverse all objects in inactive semispace, remove marks,
3181 // migrate live objects and write forwarding addresses. This stage puts 3182 // migrate live objects and write forwarding addresses. This stage puts
3182 // new entries in the store buffer and may cause some pages to be marked 3183 // new entries in the store buffer and may cause some pages to be marked
3183 // scan-on-scavenge. 3184 // scan-on-scavenge.
3184 NewSpacePageIterator it(from_bottom, from_top); 3185 NewSpacePageIterator it(from_bottom, from_top);
3185 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_); 3186 EvacuateNewSpaceVisitor new_space_visitor(heap());
3186 while (it.has_next()) { 3187 while (it.has_next()) {
3187 NewSpacePage* p = it.next(); 3188 NewSpacePage* p = it.next();
3188 survivors_size += p->LiveBytes(); 3189 survivors_size += p->LiveBytes();
3189 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits); 3190 bool ok = IterateLiveObjectsOnPage(p, &new_space_visitor, kClearMarkbits);
3190 USE(ok); 3191 USE(ok);
3191 DCHECK(ok); 3192 DCHECK(ok);
3192 } 3193 }
3193 3194
3194 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3195 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3195 new_space->set_age_mark(new_space->top()); 3196 new_space->set_age_mark(new_space->top());
(...skipping 971 matching lines...) Expand 10 before | Expand all | Expand 10 after
4167 MarkBit mark_bit = Marking::MarkBitFrom(host); 4168 MarkBit mark_bit = Marking::MarkBitFrom(host);
4168 if (Marking::IsBlack(mark_bit)) { 4169 if (Marking::IsBlack(mark_bit)) {
4169 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4170 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4170 RecordRelocSlot(&rinfo, target); 4171 RecordRelocSlot(&rinfo, target);
4171 } 4172 }
4172 } 4173 }
4173 } 4174 }
4174 4175
4175 } // namespace internal 4176 } // namespace internal
4176 } // namespace v8 4177 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698