Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index a358feafe592c488ee1e0681da2e08790bde2945..8d605317090a0d3e6611640e754dfddc73e8618c 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -1706,6 +1706,16 @@ void Heap::Scavenge() { |
new_space_front = DoScavenge(&scavenge_visitor, new_space_front); |
} |
+ { |
+ // Scavenge objects marked black or grey by MarkCompact collector. |
+ TRACE_GC(tracer(), |
+ GCTracer::Scope::SCAVENGER_OBJECTS_MARKED_BY_MARK_COMPACT); |
+ if (incremental_marking()->IsMarking()) { |
+ PromoteMarkedObjects(); |
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front); |
+ } |
+ } |
+ |
if (FLAG_scavenge_reclaim_unmodified_objects) { |
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
&IsUnscavengedHeapObject); |
@@ -3069,6 +3079,7 @@ void Heap::CreateFillerObjectAt(Address addr, int size, |
if (mode == ClearRecordedSlots::kYes) { |
ClearRecordedSlotRange(addr, addr + size); |
} |
+ |
// At this point, we may be deserializing the heap from a snapshot, and |
// none of the maps have been created yet and are NULL. |
DCHECK((filler->map() == NULL && !deserialization_complete_) || |
@@ -4854,6 +4865,21 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { |
// checking of the sync flag in the snapshot would fail. |
} |
+void Heap::PromoteMarkedObjects() { |
+ NewSpacePageIterator it(new_space_.from_space()); |
+ while (it.has_next()) { |
+ Page* page = it.next(); |
+ LiveObjectIterator<kAllLiveObjects> it(page); |
+ Object* object = nullptr; |
+ while ((object = it.Next()) != nullptr) { |
+ if (object->IsHeapObject()) { |
+ HeapObject* heap_object = HeapObject::cast(object); |
+ DCHECK(IsUnscavengedHeapObject(this, &object)); |
+ Scavenger::ScavengeObject(&heap_object, heap_object, FORCE_PROMOTION); |
+ } |
+ } |
+ } |
+} |
// TODO(1236194): Since the heap size is configurable on the command line |
// and through the API, we should gracefully handle the case that the heap |