Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index 019b9e392cc08028eee4b6cef66b0319dccb5842..a635dd46b64873f70f0422a8605f44cff091c5e0 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -1582,14 +1582,19 @@ class MarkCompactCollector::EvacuateVisitorBase |
| }; |
| -class MarkCompactCollector::EvacuateNewSpaceVisitor |
| +class MarkCompactCollector::EvacuateNewSpaceVisitor final |
| : public MarkCompactCollector::EvacuateVisitorBase { |
| public: |
| + static const intptr_t kLabSize = 2 * KB; |
|
Michael Lippautz
2015/12/01 14:51:37
kLabSize influences NewSpace::Size() as we increme
|
| + static const intptr_t kMaxLabObjectSize = 256; |
| + |
| explicit EvacuateNewSpaceVisitor(Heap* heap, |
| SlotsBuffer** evacuation_slots_buffer) |
| - : EvacuateVisitorBase(heap, evacuation_slots_buffer) {} |
| + : EvacuateVisitorBase(heap, evacuation_slots_buffer), |
| + buffer_(LocalAllocationBuffer::InvalidBuffer()), |
| + space_to_allocate_(NEW_SPACE) {} |
| - virtual bool Visit(HeapObject* object) { |
| + bool Visit(HeapObject* object) override { |
| Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
| int size = object->Size(); |
| HeapObject* target_object = nullptr; |
| @@ -1603,34 +1608,106 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor |
| heap_->IncrementPromotedObjectsSize(size); |
| return true; |
| } |
| + HeapObject* target = nullptr; |
| + AllocationSpace space = AllocateTargetObject(object, &target); |
| + heap_->mark_compact_collector()->MigrateObject( |
| + HeapObject::cast(target), object, size, space, |
| + (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); |
| + if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
| + heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
| + } |
| + heap_->IncrementSemiSpaceCopiedObjectSize(size); |
| + return true; |
| + } |
| - AllocationAlignment alignment = object->RequiredAlignment(); |
| + private: |
| + inline AllocationSpace AllocateTargetObject(HeapObject* old_object, |
| + HeapObject** target_object) { |
| + const int size = old_object->Size(); |
| + AllocationAlignment alignment = old_object->RequiredAlignment(); |
| + AllocationResult allocation; |
| + if (space_to_allocate_ == NEW_SPACE) { |
| + if (size > kMaxLabObjectSize) { |
| + allocation = AllocateInNewSpace(size, alignment); |
| + } else { |
| + allocation = AllocateInLab(size, alignment); |
| + } |
| + } |
| + if (space_to_allocate_ == OLD_SPACE) { |
| + allocation = AllocateInOldSpace(size, alignment); |
| + } |
| + bool ok = allocation.To(target_object); |
| + DCHECK(ok); |
| + USE(ok); |
| + return space_to_allocate_; |
| + } |
| + |
| + inline bool NewLocalAllocationBuffer() { |
| + AllocationResult result = AllocateInNewSpace(kLabSize, kWordAligned); |
| + buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize); |
| + return buffer_.IsValid(); |
| + } |
| + |
| + inline AllocationResult AllocateInNewSpace(int size_in_bytes, |
| + AllocationAlignment alignment) { |
| AllocationResult allocation = |
| - heap_->new_space()->AllocateRaw(size, alignment); |
| + heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment); |
| if (allocation.IsRetry()) { |
| - if (!heap_->new_space()->AddFreshPage()) { |
| - // Shouldn't happen. We are sweeping linearly, and to-space |
| - // has the same number of pages as from-space, so there is |
| - // always room unless we are in an OOM situation. |
| - FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
| + if (!heap_->new_space()->AddFreshPageSynchronized()) { |
| + space_to_allocate_ = OLD_SPACE; |
| + } else { |
| + allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes, |
| + alignment); |
| + if (allocation.IsRetry()) { |
| + space_to_allocate_ = OLD_SPACE; |
| + } |
| } |
| - allocation = heap_->new_space()->AllocateRaw(size, alignment); |
| - DCHECK(!allocation.IsRetry()); |
| } |
| - Object* target = allocation.ToObjectChecked(); |
| + return allocation; |
| + } |
| - heap_->mark_compact_collector()->MigrateObject( |
| - HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
| - if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
| - heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
| + inline AllocationResult AllocateInOldSpace(int size_in_bytes, |
| + AllocationAlignment alignment) { |
| + AllocationResult allocation = |
| + heap_->old_space()->AllocateRaw(size_in_bytes, alignment); |
| + if (allocation.IsRetry()) { |
| + FatalProcessOutOfMemory( |
| + "MarkCompactCollector: semi-space copy, fallback in old gen\n"); |
| } |
| - heap_->IncrementSemiSpaceCopiedObjectSize(size); |
| - return true; |
| + return allocation; |
| } |
| + |
| + inline AllocationResult AllocateInLab(int size_in_bytes, |
| + AllocationAlignment alignment) { |
| + AllocationResult allocation; |
| + if (!buffer_.IsValid()) { |
| + if (!NewLocalAllocationBuffer()) { |
| + space_to_allocate_ = OLD_SPACE; |
| + return AllocationResult::Retry(OLD_SPACE); |
| + } |
| + } |
| + allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment); |
| + if (allocation.IsRetry()) { |
| + if (!NewLocalAllocationBuffer()) { |
| + space_to_allocate_ = OLD_SPACE; |
| + return AllocationResult::Retry(OLD_SPACE); |
| + } else { |
| + allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment); |
| + if (allocation.IsRetry()) { |
| + space_to_allocate_ = OLD_SPACE; |
| + return AllocationResult::Retry(OLD_SPACE); |
| + } |
| + } |
| + } |
| + return allocation; |
| + } |
| + |
| + LocalAllocationBuffer buffer_; |
| + AllocationSpace space_to_allocate_; |
| }; |
| -class MarkCompactCollector::EvacuateOldSpaceVisitor |
| +class MarkCompactCollector::EvacuateOldSpaceVisitor final |
| : public MarkCompactCollector::EvacuateVisitorBase { |
| public: |
| EvacuateOldSpaceVisitor(Heap* heap, |
| @@ -1639,7 +1716,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor |
| : EvacuateVisitorBase(heap, evacuation_slots_buffer), |
| compaction_spaces_(compaction_spaces) {} |
| - virtual bool Visit(HeapObject* object) { |
| + bool Visit(HeapObject* object) override { |
| CompactionSpace* target_space = compaction_spaces_->Get( |
| Page::FromAddress(object->address())->owner()->identity()); |
| HeapObject* target_object = nullptr; |