Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 81fc924050c406a680fb395097eebee89066cf10..e1b88628209b5d004d5536de91d2edfc4764387d 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -1570,12 +1570,17 @@ class MarkCompactCollector::EvacuateVisitorBase |
}; |
-class MarkCompactCollector::EvacuateNewSpaceVisitor |
+class MarkCompactCollector::EvacuateNewSpaceVisitor final |
: public MarkCompactCollector::EvacuateVisitorBase { |
public: |
+ static const intptr_t kLabSize = 4 * KB; |
+ static const intptr_t kMaxLabObjectSize = 256; |
+ |
explicit EvacuateNewSpaceVisitor(Heap* heap, |
SlotsBuffer** evacuation_slots_buffer) |
- : EvacuateVisitorBase(heap, evacuation_slots_buffer) {} |
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer), |
+ buffer_(LocalAllocationBuffer::InvalidBuffer()), |
+ space_to_allocate_(NEW_SPACE) {} |
bool Visit(HeapObject* object) override { |
Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
@@ -1591,34 +1596,119 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor |
heap_->IncrementPromotedObjectsSize(size); |
return true; |
} |
+ HeapObject* target = nullptr; |
+ AllocationSpace space = AllocateTargetObject(object, &target); |
+ heap_->mark_compact_collector()->MigrateObject( |
+ HeapObject::cast(target), object, size, space, |
+ (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); |
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
+ heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
+ } |
+ heap_->IncrementSemiSpaceCopiedObjectSize(size); |
+ return true; |
+ } |
- AllocationAlignment alignment = object->RequiredAlignment(); |
+ private: |
+ enum NewSpaceAllocationMode { |
+ kNonstickyBailoutOldSpace, |
+ kStickyBailoutOldSpace, |
+ }; |
+ |
+ inline AllocationSpace AllocateTargetObject(HeapObject* old_object, |
+ HeapObject** target_object) { |
+ const int size = old_object->Size(); |
+ AllocationAlignment alignment = old_object->RequiredAlignment(); |
+ AllocationResult allocation; |
+ if (space_to_allocate_ == NEW_SPACE) { |
+ if (size > kMaxLabObjectSize) { |
+ allocation = |
+ AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace); |
+ } else { |
+ allocation = AllocateInLab(size, alignment); |
+ } |
+ } |
+ if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) { |
+ allocation = AllocateInOldSpace(size, alignment); |
+ } |
+ bool ok = allocation.To(target_object); |
+ DCHECK(ok); |
+ USE(ok); |
+ return space_to_allocate_; |
+ } |
+ |
+ inline bool NewLocalAllocationBuffer() { |
+ AllocationResult result = |
+ AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace); |
+ LocalAllocationBuffer saved_old_buffer = buffer_; |
+ buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize); |
+ if (buffer_.IsValid()) { |
+ buffer_.TryMerge(&saved_old_buffer); |
+ return true; |
+ } |
+ return false; |
+ } |
+ |
+ inline AllocationResult AllocateInNewSpace(int size_in_bytes, |
+ AllocationAlignment alignment, |
+ NewSpaceAllocationMode mode) { |
AllocationResult allocation = |
- heap_->new_space()->AllocateRaw(size, alignment); |
+ heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment); |
if (allocation.IsRetry()) { |
- if (!heap_->new_space()->AddFreshPage()) { |
- // Shouldn't happen. We are sweeping linearly, and to-space |
- // has the same number of pages as from-space, so there is |
- // always room unless we are in an OOM situation. |
- FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
+ if (!heap_->new_space()->AddFreshPageSynchronized()) { |
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; |
+ } else { |
+ allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes, |
+ alignment); |
+ if (allocation.IsRetry()) { |
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; |
+ } |
} |
- allocation = heap_->new_space()->AllocateRaw(size, alignment); |
- DCHECK(!allocation.IsRetry()); |
} |
- Object* target = allocation.ToObjectChecked(); |
+ return allocation; |
+ } |
- heap_->mark_compact_collector()->MigrateObject( |
- HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
- heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
+ inline AllocationResult AllocateInOldSpace(int size_in_bytes, |
+ AllocationAlignment alignment) { |
+ AllocationResult allocation = |
+ heap_->old_space()->AllocateRaw(size_in_bytes, alignment); |
+ if (allocation.IsRetry()) { |
+ FatalProcessOutOfMemory( |
+ "MarkCompactCollector: semi-space copy, fallback in old gen\n"); |
} |
- heap_->IncrementSemiSpaceCopiedObjectSize(size); |
- return true; |
+ return allocation; |
} |
+ |
+ inline AllocationResult AllocateInLab(int size_in_bytes, |
+ AllocationAlignment alignment) { |
+ AllocationResult allocation; |
+ if (!buffer_.IsValid()) { |
+ if (!NewLocalAllocationBuffer()) { |
+ space_to_allocate_ = OLD_SPACE; |
+ return AllocationResult::Retry(OLD_SPACE); |
+ } |
+ } |
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment); |
+ if (allocation.IsRetry()) { |
+ if (!NewLocalAllocationBuffer()) { |
+ space_to_allocate_ = OLD_SPACE; |
+ return AllocationResult::Retry(OLD_SPACE); |
+ } else { |
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment); |
+ if (allocation.IsRetry()) { |
+ space_to_allocate_ = OLD_SPACE; |
+ return AllocationResult::Retry(OLD_SPACE); |
+ } |
+ } |
+ } |
+ return allocation; |
+ } |
+ |
+ LocalAllocationBuffer buffer_; |
+ AllocationSpace space_to_allocate_; |
}; |
-class MarkCompactCollector::EvacuateOldSpaceVisitor |
+class MarkCompactCollector::EvacuateOldSpaceVisitor final |
: public MarkCompactCollector::EvacuateVisitorBase { |
public: |
EvacuateOldSpaceVisitor(Heap* heap, |