Chromium Code Reviews| OLD | NEW | 
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" | 
| 6 | 6 | 
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" | 
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" | 
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" | 
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" | 
| (...skipping 1552 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1563 } | 1563 } | 
| 1564 return false; | 1564 return false; | 
| 1565 } | 1565 } | 
| 1566 | 1566 | 
| 1567 protected: | 1567 protected: | 
| 1568 Heap* heap_; | 1568 Heap* heap_; | 
| 1569 SlotsBuffer** evacuation_slots_buffer_; | 1569 SlotsBuffer** evacuation_slots_buffer_; | 
| 1570 }; | 1570 }; | 
| 1571 | 1571 | 
| 1572 | 1572 | 
| 1573 class MarkCompactCollector::EvacuateNewSpaceVisitor | 1573 class MarkCompactCollector::EvacuateNewSpaceVisitor final | 
| 1574 : public MarkCompactCollector::EvacuateVisitorBase { | 1574 : public MarkCompactCollector::EvacuateVisitorBase { | 
| 1575 public: | 1575 public: | 
| 1576 static const intptr_t kLabSize = 2 * KB; | |
| 1577 static const intptr_t kMaxLabObjectSize = 256; | |
| 
 
Hannes Payer (out of office)
2015/12/16 14:51:06
How did you identify these parameters? Do you thin
 
Michael Lippautz
2015/12/16 20:06:04
These constant are not backed by real-world experi
 
 | |
| 1578 | |
| 1576 explicit EvacuateNewSpaceVisitor(Heap* heap, | 1579 explicit EvacuateNewSpaceVisitor(Heap* heap, | 
| 1577 SlotsBuffer** evacuation_slots_buffer) | 1580 SlotsBuffer** evacuation_slots_buffer) | 
| 1578 : EvacuateVisitorBase(heap, evacuation_slots_buffer) {} | 1581 : EvacuateVisitorBase(heap, evacuation_slots_buffer), | 
| 1582 buffer_(LocalAllocationBuffer::InvalidBuffer()), | |
| 1583 space_to_allocate_(NEW_SPACE) {} | |
| 1579 | 1584 | 
| 1580 bool Visit(HeapObject* object) override { | 1585 bool Visit(HeapObject* object) override { | 
| 1581 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); | 1586 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); | 
| 1582 int size = object->Size(); | 1587 int size = object->Size(); | 
| 1583 HeapObject* target_object = nullptr; | 1588 HeapObject* target_object = nullptr; | 
| 1584 if (heap_->ShouldBePromoted(object->address(), size) && | 1589 if (heap_->ShouldBePromoted(object->address(), size) && | 
| 1585 TryEvacuateObject(heap_->old_space(), object, &target_object)) { | 1590 TryEvacuateObject(heap_->old_space(), object, &target_object)) { | 
| 1586 // If we end up needing more special cases, we should factor this out. | 1591 // If we end up needing more special cases, we should factor this out. | 
| 1587 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { | 1592 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { | 
| 1588 heap_->array_buffer_tracker()->Promote( | 1593 heap_->array_buffer_tracker()->Promote( | 
| 1589 JSArrayBuffer::cast(target_object)); | 1594 JSArrayBuffer::cast(target_object)); | 
| 1590 } | 1595 } | 
| 1591 heap_->IncrementPromotedObjectsSize(size); | 1596 heap_->IncrementPromotedObjectsSize(size); | 
| 1592 return true; | 1597 return true; | 
| 1593 } | 1598 } | 
| 1594 | 1599 HeapObject* target = nullptr; | 
| 1595 AllocationAlignment alignment = object->RequiredAlignment(); | 1600 AllocationSpace space = AllocateTargetObject(object, &target); | 
| 1596 AllocationResult allocation = | |
| 1597 heap_->new_space()->AllocateRaw(size, alignment); | |
| 1598 if (allocation.IsRetry()) { | |
| 1599 if (!heap_->new_space()->AddFreshPage()) { | |
| 1600 // Shouldn't happen. We are sweeping linearly, and to-space | |
| 1601 // has the same number of pages as from-space, so there is | |
| 1602 // always room unless we are in an OOM situation. | |
| 1603 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); | |
| 1604 } | |
| 1605 allocation = heap_->new_space()->AllocateRaw(size, alignment); | |
| 1606 DCHECK(!allocation.IsRetry()); | |
| 1607 } | |
| 1608 Object* target = allocation.ToObjectChecked(); | |
| 1609 | |
| 1610 heap_->mark_compact_collector()->MigrateObject( | 1601 heap_->mark_compact_collector()->MigrateObject( | 
| 1611 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); | 1602 HeapObject::cast(target), object, size, space, | 
| 1603 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); | |
| 1612 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | 1604 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | 
| 1613 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | 1605 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | 
| 1614 } | 1606 } | 
| 1615 heap_->IncrementSemiSpaceCopiedObjectSize(size); | 1607 heap_->IncrementSemiSpaceCopiedObjectSize(size); | 
| 1616 return true; | 1608 return true; | 
| 1617 } | 1609 } | 
| 1610 | |
| 1611 private: | |
| 1612 inline AllocationSpace AllocateTargetObject(HeapObject* old_object, | |
| 1613 HeapObject** target_object) { | |
| 1614 const int size = old_object->Size(); | |
| 1615 AllocationAlignment alignment = old_object->RequiredAlignment(); | |
| 1616 AllocationResult allocation; | |
| 1617 if (space_to_allocate_ == NEW_SPACE) { | |
| 1618 if (size > kMaxLabObjectSize) { | |
| 1619 allocation = AllocateInNewSpace(size, alignment); | |
| 1620 } else { | |
| 1621 allocation = AllocateInLab(size, alignment); | |
| 1622 } | |
| 1623 } | |
| 1624 if (space_to_allocate_ == OLD_SPACE) { | |
| 1625 allocation = AllocateInOldSpace(size, alignment); | |
| 1626 } | |
| 1627 bool ok = allocation.To(target_object); | |
| 1628 DCHECK(ok); | |
| 1629 USE(ok); | |
| 1630 return space_to_allocate_; | |
| 1631 } | |
| 1632 | |
| 1633 inline bool NewLocalAllocationBuffer() { | |
| 1634 AllocationResult result = AllocateInNewSpace(kLabSize, kWordAligned); | |
| 1635 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize); | |
| 1636 return buffer_.IsValid(); | |
| 1637 } | |
| 1638 | |
| 1639 inline AllocationResult AllocateInNewSpace(int size_in_bytes, | |
| 1640 AllocationAlignment alignment) { | |
| 1641 AllocationResult allocation = | |
| 1642 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment); | |
| 1643 if (allocation.IsRetry()) { | |
| 1644 if (!heap_->new_space()->AddFreshPageSynchronized()) { | |
| 1645 space_to_allocate_ = OLD_SPACE; | |
| 1646 } else { | |
| 1647 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes, | |
| 1648 alignment); | |
| 1649 if (allocation.IsRetry()) { | |
| 1650 space_to_allocate_ = OLD_SPACE; | |
| 1651 } | |
| 1652 } | |
| 1653 } | |
| 1654 return allocation; | |
| 1655 } | |
| 1656 | |
| 1657 inline AllocationResult AllocateInOldSpace(int size_in_bytes, | |
| 
 
Hannes Payer (out of office)
2015/12/16 14:51:06
Are you going to provide LABs in old space in a se
 
Michael Lippautz
2015/12/16 20:06:04
LABs in old space are not needed as we have compac
 
 | |
| 1658 AllocationAlignment alignment) { | |
| 1659 AllocationResult allocation = | |
| 1660 heap_->old_space()->AllocateRaw(size_in_bytes, alignment); | |
| 1661 if (allocation.IsRetry()) { | |
| 1662 FatalProcessOutOfMemory( | |
| 1663 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); | |
| 1664 } | |
| 1665 return allocation; | |
| 1666 } | |
| 1667 | |
| 1668 inline AllocationResult AllocateInLab(int size_in_bytes, | |
| 1669 AllocationAlignment alignment) { | |
| 1670 AllocationResult allocation; | |
| 1671 if (!buffer_.IsValid()) { | |
| 1672 if (!NewLocalAllocationBuffer()) { | |
| 1673 space_to_allocate_ = OLD_SPACE; | |
| 1674 return AllocationResult::Retry(OLD_SPACE); | |
| 1675 } | |
| 1676 } | |
| 1677 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment); | |
| 1678 if (allocation.IsRetry()) { | |
| 1679 if (!NewLocalAllocationBuffer()) { | |
| 
 
Hannes Payer (out of office)
2015/12/16 14:51:06
It would be nice to preserve the same behavior we
 
Michael Lippautz
2015/12/16 20:06:04
Done, however there's still a slight mismatch in b
 
 | |
| 1680 space_to_allocate_ = OLD_SPACE; | |
| 1681 return AllocationResult::Retry(OLD_SPACE); | |
| 1682 } else { | |
| 1683 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment); | |
| 1684 if (allocation.IsRetry()) { | |
| 1685 space_to_allocate_ = OLD_SPACE; | |
| 1686 return AllocationResult::Retry(OLD_SPACE); | |
| 1687 } | |
| 1688 } | |
| 1689 } | |
| 1690 return allocation; | |
| 1691 } | |
| 1692 | |
| 1693 LocalAllocationBuffer buffer_; | |
| 1694 AllocationSpace space_to_allocate_; | |
| 1618 }; | 1695 }; | 
| 1619 | 1696 | 
| 1620 | 1697 | 
| 1621 class MarkCompactCollector::EvacuateOldSpaceVisitor | 1698 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 
| 1622 : public MarkCompactCollector::EvacuateVisitorBase { | 1699 : public MarkCompactCollector::EvacuateVisitorBase { | 
| 1623 public: | 1700 public: | 
| 1624 EvacuateOldSpaceVisitor(Heap* heap, | 1701 EvacuateOldSpaceVisitor(Heap* heap, | 
| 1625 CompactionSpaceCollection* compaction_spaces, | 1702 CompactionSpaceCollection* compaction_spaces, | 
| 1626 SlotsBuffer** evacuation_slots_buffer) | 1703 SlotsBuffer** evacuation_slots_buffer) | 
| 1627 : EvacuateVisitorBase(heap, evacuation_slots_buffer), | 1704 : EvacuateVisitorBase(heap, evacuation_slots_buffer), | 
| 1628 compaction_spaces_(compaction_spaces) {} | 1705 compaction_spaces_(compaction_spaces) {} | 
| 1629 | 1706 | 
| 1630 bool Visit(HeapObject* object) override { | 1707 bool Visit(HeapObject* object) override { | 
| 1631 CompactionSpace* target_space = compaction_spaces_->Get( | 1708 CompactionSpace* target_space = compaction_spaces_->Get( | 
| (...skipping 2327 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3959 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4036 MarkBit mark_bit = Marking::MarkBitFrom(host); | 
| 3960 if (Marking::IsBlack(mark_bit)) { | 4037 if (Marking::IsBlack(mark_bit)) { | 
| 3961 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4038 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 
| 3962 RecordRelocSlot(&rinfo, target); | 4039 RecordRelocSlot(&rinfo, target); | 
| 3963 } | 4040 } | 
| 3964 } | 4041 } | 
| 3965 } | 4042 } | 
| 3966 | 4043 | 
| 3967 } // namespace internal | 4044 } // namespace internal | 
| 3968 } // namespace v8 | 4045 } // namespace v8 | 
| OLD | NEW |