Chromium Code Reviews| Index: src/heap.cc |
| diff --git a/src/heap.cc b/src/heap.cc |
| index 4dea51050d3f1a37e21b81ae7ea9ec53ceb4d341..d8ccfedb89715df93bf5030f0cfcaa3ad812f431 100644 |
| --- a/src/heap.cc |
| +++ b/src/heap.cc |
| @@ -1970,14 +1970,51 @@ class ScavengingVisitor : public StaticVisitorBase { |
| } |
| } |
| + template<int alignment> |
| + static inline bool SemiSpaceCopyObject(Map* map, |
| + HeapObject** slot, |
| + HeapObject* object, |
| + int object_size) { |
| + Heap* heap = map->GetHeap(); |
| + |
| + int allocation_size = object_size; |
| + if (alignment != kObjectAlignment) { |
| + ASSERT(alignment == kDoubleAlignment); |
| + allocation_size += kPointerSize; |
| + } |
| + |
| + ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
| + AllocationResult allocation = |
| + heap->new_space()->AllocateRaw(allocation_size); |
| + |
| + // Allocation in the other semi-space may fail due to fragmentation. |
| + // In that case we allocate in the old generation. |
|
Igor Sheludko
2014/06/25 09:06:37
Probably this comment should not be here since you
Hannes Payer (out of office)
2014/06/25 09:25:44
Done.
|
| + HeapObject* target = NULL; // Initialization to please compiler. |
| + if (allocation.To(&target)) { |
| + if (alignment != kObjectAlignment) { |
| + target = EnsureDoubleAligned(heap, target, allocation_size); |
| + } |
| + |
| + // Order is important: slot might be inside of the target if target |
| + // was allocated over a dead object and slot comes from the store |
| + // buffer. |
| + *slot = target; |
| + MigrateObject(heap, object, target, object_size); |
| + |
| + heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
| + heap->IncrementSemiSpaceCopiedObjectSize(object_size); |
| + return true; |
| + } |
| + return false; |
| + } |
| + |
| template<ObjectContents object_contents, int alignment> |
| - static inline void EvacuateObject(Map* map, |
| - HeapObject** slot, |
| - HeapObject* object, |
| - int object_size) { |
| - SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); |
| - SLOW_ASSERT(object->Size() == object_size); |
| + static inline bool PromoteObject(Map* map, |
| + HeapObject** slot, |
| + HeapObject* object, |
| + int object_size) { |
| + Heap* heap = map->GetHeap(); |
| int allocation_size = object_size; |
| if (alignment != kObjectAlignment) { |
| @@ -1985,73 +2022,68 @@ class ScavengingVisitor : public StaticVisitorBase { |
| allocation_size += kPointerSize; |
| } |
| - Heap* heap = map->GetHeap(); |
| - if (heap->ShouldBePromoted(object->address(), object_size)) { |
| - AllocationResult allocation; |
| + AllocationResult allocation; |
| + if (object_contents == DATA_OBJECT) { |
| + ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
| + allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
| + } else { |
| + ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
| + allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
| + } |
| - if (object_contents == DATA_OBJECT) { |
| - ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
| - allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
| - } else { |
| - ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
| - allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
| + HeapObject* target = NULL; // Initialization to please compiler. |
| + if (allocation.To(&target)) { |
| + if (alignment != kObjectAlignment) { |
| + target = EnsureDoubleAligned(heap, target, allocation_size); |
| } |
| - HeapObject* target = NULL; // Initialization to please compiler. |
| - if (allocation.To(&target)) { |
| - if (alignment != kObjectAlignment) { |
| - target = EnsureDoubleAligned(heap, target, allocation_size); |
| + // Order is important: slot might be inside of the target if target |
| + // was allocated over a dead object and slot comes from the store |
| + // buffer. |
| + *slot = target; |
| + MigrateObject(heap, object, target, object_size); |
| + |
| + if (object_contents == POINTER_OBJECT) { |
| + if (map->instance_type() == JS_FUNCTION_TYPE) { |
| + heap->promotion_queue()->insert( |
| + target, JSFunction::kNonWeakFieldsEndOffset); |
| + } else { |
| + heap->promotion_queue()->insert(target, object_size); |
| } |
| + } |
| + heap->IncrementPromotedObjectsSize(object_size); |
| + return true; |
| + } |
| + return false; |
| + } |
| - // Order is important: slot might be inside of the target if target |
| - // was allocated over a dead object and slot comes from the store |
| - // buffer. |
| - *slot = target; |
| - MigrateObject(heap, object, target, object_size); |
| - if (object_contents == POINTER_OBJECT) { |
| - if (map->instance_type() == JS_FUNCTION_TYPE) { |
| - heap->promotion_queue()->insert( |
| - target, JSFunction::kNonWeakFieldsEndOffset); |
| - } else { |
| - heap->promotion_queue()->insert(target, object_size); |
| - } |
| - } |
| + template<ObjectContents object_contents, int alignment> |
| + static inline void EvacuateObject(Map* map, |
| + HeapObject** slot, |
| + HeapObject* object, |
| + int object_size) { |
| + SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); |
| + SLOW_ASSERT(object->Size() == object_size); |
| + Heap* heap = map->GetHeap(); |
| - heap->IncrementPromotedObjectsSize(object_size); |
| + if (!heap->ShouldBePromoted(object->address(), object_size)) { |
| + // A semi-space copy may fail due to fragmentation. In that case, we |
| + // try to promote the object. |
| + if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { |
| return; |
| } |
| } |
| - ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
| - AllocationResult allocation = |
| - heap->new_space()->AllocateRaw(allocation_size); |
| - heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
| - // Allocation in the other semi-space may fail due to fragmentation. |
| - // In that case we allocate in the old generation. |
| - if (allocation.IsRetry()) { |
| - if (object_contents == DATA_OBJECT) { |
| - ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
| - allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
| - } else { |
| - ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
| - allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
| - } |
| + if (PromoteObject<object_contents, alignment>( |
| + map, slot, object, object_size)) { |
| + return; |
| } |
| - HeapObject* target = HeapObject::cast(allocation.ToObjectChecked()); |
| - |
| - if (alignment != kObjectAlignment) { |
| - target = EnsureDoubleAligned(heap, target, allocation_size); |
| - } |
| + // If promotion failed, we try to copy the object to the other semi-space |
| + if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return; |
| - // Order is important: slot might be inside of the target if target |
| - // was allocated over a dead object and slot comes from the store |
| - // buffer. |
| - *slot = target; |
| - MigrateObject(heap, object, target, object_size); |
| - heap->IncrementSemiSpaceCopiedObjectSize(object_size); |
| - return; |
| + UNREACHABLE(); |
| } |