Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(307)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1487853002: [heap] Move to LAB-based allocation for newspace evacuation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Added cctests for LocalAllocationBuffer Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1564 matching lines...) Expand 10 before | Expand all | Expand 10 after
1575 } 1575 }
1576 return false; 1576 return false;
1577 } 1577 }
1578 1578
1579 protected: 1579 protected:
1580 Heap* heap_; 1580 Heap* heap_;
1581 SlotsBuffer** evacuation_slots_buffer_; 1581 SlotsBuffer** evacuation_slots_buffer_;
1582 }; 1582 };
1583 1583
1584 1584
1585 class MarkCompactCollector::EvacuateNewSpaceVisitor 1585 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1586 : public MarkCompactCollector::EvacuateVisitorBase { 1586 : public MarkCompactCollector::EvacuateVisitorBase {
1587 public: 1587 public:
1588 static const intptr_t kLabSize = 2 * KB;
Michael Lippautz 2015/12/01 14:51:37 kLabSize influences NewSpace::Size() as we increme
1589 static const intptr_t kMaxLabObjectSize = 256;
1590
1588 explicit EvacuateNewSpaceVisitor(Heap* heap, 1591 explicit EvacuateNewSpaceVisitor(Heap* heap,
1589 SlotsBuffer** evacuation_slots_buffer) 1592 SlotsBuffer** evacuation_slots_buffer)
1590 : EvacuateVisitorBase(heap, evacuation_slots_buffer) {} 1593 : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1594 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1595 space_to_allocate_(NEW_SPACE) {}
1591 1596
1592 virtual bool Visit(HeapObject* object) { 1597 bool Visit(HeapObject* object) override {
1593 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); 1598 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1594 int size = object->Size(); 1599 int size = object->Size();
1595 HeapObject* target_object = nullptr; 1600 HeapObject* target_object = nullptr;
1596 if (heap_->ShouldBePromoted(object->address(), size) && 1601 if (heap_->ShouldBePromoted(object->address(), size) &&
1597 TryEvacuateObject(heap_->old_space(), object, &target_object)) { 1602 TryEvacuateObject(heap_->old_space(), object, &target_object)) {
1598 // If we end up needing more special cases, we should factor this out. 1603 // If we end up needing more special cases, we should factor this out.
1599 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1604 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1600 heap_->array_buffer_tracker()->Promote( 1605 heap_->array_buffer_tracker()->Promote(
1601 JSArrayBuffer::cast(target_object)); 1606 JSArrayBuffer::cast(target_object));
1602 } 1607 }
1603 heap_->IncrementPromotedObjectsSize(size); 1608 heap_->IncrementPromotedObjectsSize(size);
1604 return true; 1609 return true;
1605 } 1610 }
1606 1611 HeapObject* target = nullptr;
1607 AllocationAlignment alignment = object->RequiredAlignment(); 1612 AllocationSpace space = AllocateTargetObject(object, &target);
1608 AllocationResult allocation =
1609 heap_->new_space()->AllocateRaw(size, alignment);
1610 if (allocation.IsRetry()) {
1611 if (!heap_->new_space()->AddFreshPage()) {
1612 // Shouldn't happen. We are sweeping linearly, and to-space
1613 // has the same number of pages as from-space, so there is
1614 // always room unless we are in an OOM situation.
1615 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1616 }
1617 allocation = heap_->new_space()->AllocateRaw(size, alignment);
1618 DCHECK(!allocation.IsRetry());
1619 }
1620 Object* target = allocation.ToObjectChecked();
1621
1622 heap_->mark_compact_collector()->MigrateObject( 1613 heap_->mark_compact_collector()->MigrateObject(
1623 HeapObject::cast(target), object, size, NEW_SPACE, nullptr); 1614 HeapObject::cast(target), object, size, space,
1615 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
1624 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 1616 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1625 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); 1617 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1626 } 1618 }
1627 heap_->IncrementSemiSpaceCopiedObjectSize(size); 1619 heap_->IncrementSemiSpaceCopiedObjectSize(size);
1628 return true; 1620 return true;
1629 } 1621 }
1622
1623 private:
1624 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1625 HeapObject** target_object) {
1626 const int size = old_object->Size();
1627 AllocationAlignment alignment = old_object->RequiredAlignment();
1628 AllocationResult allocation;
1629 if (space_to_allocate_ == NEW_SPACE) {
1630 if (size > kMaxLabObjectSize) {
1631 allocation = AllocateInNewSpace(size, alignment);
1632 } else {
1633 allocation = AllocateInLab(size, alignment);
1634 }
1635 }
1636 if (space_to_allocate_ == OLD_SPACE) {
1637 allocation = AllocateInOldSpace(size, alignment);
1638 }
1639 bool ok = allocation.To(target_object);
1640 DCHECK(ok);
1641 USE(ok);
1642 return space_to_allocate_;
1643 }
1644
1645 inline bool NewLocalAllocationBuffer() {
1646 AllocationResult result = AllocateInNewSpace(kLabSize, kWordAligned);
1647 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1648 return buffer_.IsValid();
1649 }
1650
1651 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1652 AllocationAlignment alignment) {
1653 AllocationResult allocation =
1654 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1655 if (allocation.IsRetry()) {
1656 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1657 space_to_allocate_ = OLD_SPACE;
1658 } else {
1659 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1660 alignment);
1661 if (allocation.IsRetry()) {
1662 space_to_allocate_ = OLD_SPACE;
1663 }
1664 }
1665 }
1666 return allocation;
1667 }
1668
1669 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1670 AllocationAlignment alignment) {
1671 AllocationResult allocation =
1672 heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
1673 if (allocation.IsRetry()) {
1674 FatalProcessOutOfMemory(
1675 "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1676 }
1677 return allocation;
1678 }
1679
1680 inline AllocationResult AllocateInLab(int size_in_bytes,
1681 AllocationAlignment alignment) {
1682 AllocationResult allocation;
1683 if (!buffer_.IsValid()) {
1684 if (!NewLocalAllocationBuffer()) {
1685 space_to_allocate_ = OLD_SPACE;
1686 return AllocationResult::Retry(OLD_SPACE);
1687 }
1688 }
1689 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1690 if (allocation.IsRetry()) {
1691 if (!NewLocalAllocationBuffer()) {
1692 space_to_allocate_ = OLD_SPACE;
1693 return AllocationResult::Retry(OLD_SPACE);
1694 } else {
1695 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1696 if (allocation.IsRetry()) {
1697 space_to_allocate_ = OLD_SPACE;
1698 return AllocationResult::Retry(OLD_SPACE);
1699 }
1700 }
1701 }
1702 return allocation;
1703 }
1704
1705 LocalAllocationBuffer buffer_;
1706 AllocationSpace space_to_allocate_;
1630 }; 1707 };
1631 1708
1632 1709
1633 class MarkCompactCollector::EvacuateOldSpaceVisitor 1710 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1634 : public MarkCompactCollector::EvacuateVisitorBase { 1711 : public MarkCompactCollector::EvacuateVisitorBase {
1635 public: 1712 public:
1636 EvacuateOldSpaceVisitor(Heap* heap, 1713 EvacuateOldSpaceVisitor(Heap* heap,
1637 CompactionSpaceCollection* compaction_spaces, 1714 CompactionSpaceCollection* compaction_spaces,
1638 SlotsBuffer** evacuation_slots_buffer) 1715 SlotsBuffer** evacuation_slots_buffer)
1639 : EvacuateVisitorBase(heap, evacuation_slots_buffer), 1716 : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1640 compaction_spaces_(compaction_spaces) {} 1717 compaction_spaces_(compaction_spaces) {}
1641 1718
1642 virtual bool Visit(HeapObject* object) { 1719 bool Visit(HeapObject* object) override {
1643 CompactionSpace* target_space = compaction_spaces_->Get( 1720 CompactionSpace* target_space = compaction_spaces_->Get(
1644 Page::FromAddress(object->address())->owner()->identity()); 1721 Page::FromAddress(object->address())->owner()->identity());
1645 HeapObject* target_object = nullptr; 1722 HeapObject* target_object = nullptr;
1646 if (TryEvacuateObject(target_space, object, &target_object)) { 1723 if (TryEvacuateObject(target_space, object, &target_object)) {
1647 DCHECK(object->map_word().IsForwardingAddress()); 1724 DCHECK(object->map_word().IsForwardingAddress());
1648 return true; 1725 return true;
1649 } 1726 }
1650 return false; 1727 return false;
1651 } 1728 }
1652 1729
(...skipping 2514 matching lines...) Expand 10 before | Expand all | Expand 10 after
4167 MarkBit mark_bit = Marking::MarkBitFrom(host); 4244 MarkBit mark_bit = Marking::MarkBitFrom(host);
4168 if (Marking::IsBlack(mark_bit)) { 4245 if (Marking::IsBlack(mark_bit)) {
4169 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4246 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4170 RecordRelocSlot(&rinfo, target); 4247 RecordRelocSlot(&rinfo, target);
4171 } 4248 }
4172 } 4249 }
4173 } 4250 }
4174 4251
4175 } // namespace internal 4252 } // namespace internal
4176 } // namespace v8 4253 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698