Index: src/heap.cc |
diff --git a/src/heap.cc b/src/heap.cc |
index 4dea51050d3f1a37e21b81ae7ea9ec53ceb4d341..8a3b4b6aa4a1a6b97e11f9d65e3638f14de337f3 100644 |
--- a/src/heap.cc |
+++ b/src/heap.cc |
@@ -1985,20 +1985,20 @@ class ScavengingVisitor : public StaticVisitorBase { |
allocation_size += kPointerSize; |
} |
+ AllocationResult allocation; |
Igor Sheludko
2014/06/24 16:45:51
Move this closer to the usages.
Hannes Payer (out of office)
2014/06/24 19:23:03
Done. I removed the local version and use just one
|
Heap* heap = map->GetHeap(); |
- if (heap->ShouldBePromoted(object->address(), object_size)) { |
- AllocationResult allocation; |
- if (object_contents == DATA_OBJECT) { |
- ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
- allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
- } else { |
- ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
- allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
- } |
+ if (!heap->ShouldBePromoted(object->address(), object_size)) { |
+ ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
+ AllocationResult allocation = |
+ heap->new_space()->AllocateRaw(allocation_size); |
+ |
+ // Allocation in the other semi-space may fail due to fragmentation. |
+ // In that case we allocate in the old generation. |
+ if (!allocation.IsRetry()) { |
Igor Sheludko
2014/06/24 16:45:51
To be uniform:
HeapObject* target = NULL;
if (
Hannes Payer (out of office)
2014/06/24 19:23:03
Done.
|
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
+ HeapObject* target = HeapObject::cast(allocation.ToObjectChecked()); |
- HeapObject* target = NULL; // Initialization to please compiler. |
- if (allocation.To(&target)) { |
if (alignment != kObjectAlignment) { |
target = EnsureDoubleAligned(heap, target, allocation_size); |
} |
@@ -2008,50 +2008,47 @@ class ScavengingVisitor : public StaticVisitorBase { |
// buffer. |
*slot = target; |
MigrateObject(heap, object, target, object_size); |
- |
- if (object_contents == POINTER_OBJECT) { |
- if (map->instance_type() == JS_FUNCTION_TYPE) { |
- heap->promotion_queue()->insert( |
- target, JSFunction::kNonWeakFieldsEndOffset); |
- } else { |
- heap->promotion_queue()->insert(target, object_size); |
- } |
- } |
- |
- heap->IncrementPromotedObjectsSize(object_size); |
+ heap->IncrementSemiSpaceCopiedObjectSize(object_size); |
return; |
} |
} |
- ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); |
- AllocationResult allocation = |
- heap->new_space()->AllocateRaw(allocation_size); |
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
- |
- // Allocation in the other semi-space may fail due to fragmentation. |
- // In that case we allocate in the old generation. |
- if (allocation.IsRetry()) { |
- if (object_contents == DATA_OBJECT) { |
- ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
- allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
- } else { |
- ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
- allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
- } |
+ |
+ if (object_contents == DATA_OBJECT) { |
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); |
+ allocation = heap->old_data_space()->AllocateRaw(allocation_size); |
+ } else { |
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); |
+ allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); |
} |
- HeapObject* target = HeapObject::cast(allocation.ToObjectChecked()); |
+ HeapObject* target = NULL; // Initialization to please compiler. |
+ if (allocation.To(&target)) { |
+ if (alignment != kObjectAlignment) { |
+ target = EnsureDoubleAligned(heap, target, allocation_size); |
+ } |
- if (alignment != kObjectAlignment) { |
- target = EnsureDoubleAligned(heap, target, allocation_size); |
+ // Order is important: slot might be inside of the target if target |
+ // was allocated over a dead object and slot comes from the store |
+ // buffer. |
+ *slot = target; |
+ MigrateObject(heap, object, target, object_size); |
+ |
+ if (object_contents == POINTER_OBJECT) { |
+ if (map->instance_type() == JS_FUNCTION_TYPE) { |
+ heap->promotion_queue()->insert(target, |
+ JSFunction::kNonWeakFieldsEndOffset); |
+ } else { |
+ heap->promotion_queue()->insert(target, object_size); |
+ } |
+ } |
+ |
+ heap->IncrementPromotedObjectsSize(object_size); |
+ return; |
} |
- // Order is important: slot might be inside of the target if target |
- // was allocated over a dead object and slot comes from the store |
- // buffer. |
- *slot = target; |
- MigrateObject(heap, object, target, object_size); |
- heap->IncrementSemiSpaceCopiedObjectSize(object_size); |
- return; |
+ // The scavenger should always have enough space available in the old |
+ // generation for promotion. Otherwise a full gc would have been triggered. |
+ UNREACHABLE(); |
} |