Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(317)

Unified Diff: src/heap.cc

Issue 352083002: Revert "Update survival statistics correctly in the Scavenger." (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap.cc
diff --git a/src/heap.cc b/src/heap.cc
index 627ea7792e4a95c7532c16e8b9b8263be3b5ab7f..4dea51050d3f1a37e21b81ae7ea9ec53ceb4d341 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1986,14 +1986,17 @@ class ScavengingVisitor : public StaticVisitorBase {
}
Heap* heap = map->GetHeap();
- AllocationResult allocation;
+ if (heap->ShouldBePromoted(object->address(), object_size)) {
+ AllocationResult allocation;
- if (!heap->ShouldBePromoted(object->address(), object_size)) {
- ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
- allocation = heap->new_space()->AllocateRaw(allocation_size);
+ if (object_contents == DATA_OBJECT) {
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ allocation = heap->old_data_space()->AllocateRaw(allocation_size);
+ } else {
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
+ }
- // Allocation in the other semi-space may fail due to fragmentation.
- // In that case we allocate in the old generation.
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
if (alignment != kObjectAlignment) {
@@ -2006,48 +2009,49 @@ class ScavengingVisitor : public StaticVisitorBase {
*slot = target;
MigrateObject(heap, object, target, object_size);
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
- heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+ if (object_contents == POINTER_OBJECT) {
+ if (map->instance_type() == JS_FUNCTION_TYPE) {
+ heap->promotion_queue()->insert(
+ target, JSFunction::kNonWeakFieldsEndOffset);
+ } else {
+ heap->promotion_queue()->insert(target, object_size);
+ }
+ }
+
+ heap->IncrementPromotedObjectsSize(object_size);
return;
}
}
-
- if (object_contents == DATA_OBJECT) {
- ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
- allocation = heap->old_data_space()->AllocateRaw(allocation_size);
- } else {
- ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
- allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
- }
-
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- if (alignment != kObjectAlignment) {
- target = EnsureDoubleAligned(heap, target, allocation_size);
+ ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
+ AllocationResult allocation =
+ heap->new_space()->AllocateRaw(allocation_size);
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
+ // Allocation in the other semi-space may fail due to fragmentation.
+ // In that case we allocate in the old generation.
+ if (allocation.IsRetry()) {
+ if (object_contents == DATA_OBJECT) {
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ allocation = heap->old_data_space()->AllocateRaw(allocation_size);
+ } else {
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
+ }
- // Order is important: slot might be inside of the target if target
- // was allocated over a dead object and slot comes from the store
- // buffer.
- *slot = target;
- MigrateObject(heap, object, target, object_size);
-
- if (object_contents == POINTER_OBJECT) {
- if (map->instance_type() == JS_FUNCTION_TYPE) {
- heap->promotion_queue()->insert(target,
- JSFunction::kNonWeakFieldsEndOffset);
- } else {
- heap->promotion_queue()->insert(target, object_size);
- }
- }
+ HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
- heap->IncrementPromotedObjectsSize(object_size);
- return;
+ if (alignment != kObjectAlignment) {
+ target = EnsureDoubleAligned(heap, target, allocation_size);
}
- // The scavenger should always have enough space available in the old
- // generation for promotion. Otherwise a full gc would have been triggered.
- UNREACHABLE();
+ // Order is important: slot might be inside of the target if target
+ // was allocated over a dead object and slot comes from the store
+ // buffer.
+ *slot = target;
+ MigrateObject(heap, object, target, object_size);
+ heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+ return;
}
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698