Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index e77e90e166bfaeb6e8347dd56698e94ec7ec8906..a086f0ca76953b17cbb0234f7face4e23ed380b3 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -2935,7 +2935,9 @@ class PointersUpdatingVisitor : public ObjectVisitor { |
| MapWord map_word = heap_obj->map_word(); |
| if (map_word.IsForwardingAddress()) { |
| DCHECK(heap->InFromSpace(heap_obj) || |
| - MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); |
| + MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) || |
| + Page::FromAddress(heap_obj->address()) |
| + ->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); |
| HeapObject* target = map_word.ToForwardingAddress(); |
| base::NoBarrier_CompareAndSwap( |
| reinterpret_cast<base::AtomicWord*>(slot), |
| @@ -3463,7 +3465,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| // - Leave the page in the list of pages of a space since we could not |
| // fully evacuate it. |
| DCHECK(p->IsEvacuationCandidate()); |
| - p->SetFlag(Page::RESCAN_ON_EVACUATION); |
| + p->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| abandoned_pages++; |
| break; |
| case MemoryChunk::kCompactingFinalize: |
| @@ -3794,13 +3796,27 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| // code objects from non-updated pc pointing into evacuation candidate. |
| SkipList* list = p->skip_list(); |
| if (list != NULL) list->Clear(); |
| - } |
| - if (p->IsEvacuationCandidate() && |
| - p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| - // Case where we've aborted compacting a page. Clear the flag here to |
| - // avoid release the page later on. |
| - p->ClearEvacuationCandidate(); |
| + if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| + p->ClearEvacuationCandidate(); |
| + // First pass on aborted pages. |
| + int offsets[16]; |
| + for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| + Address cell_base = it.CurrentCellBase(); |
| + MarkBit::CellType* cell = it.CurrentCell(); |
| + if (*cell == 0) continue; |
| + int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| + for (int i = 0; i < live_objects; i++) { |
| + Address object_addr = cell_base + offsets[i] * kPointerSize; |
| + HeapObject* live_object = HeapObject::FromAddress(object_addr); |
| + DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object))); |
| + Map* map = live_object->synchronized_map(); |
| + int size = live_object->SizeFromMap(map); |
| + live_object->IterateBody(map->instance_type(), size, |
| + &updating_visitor); |
| + } |
| + } |
| + } |
| } |
| if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| @@ -3848,6 +3864,33 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| EvacuationWeakObjectRetainer evacuation_object_retainer; |
| heap()->ProcessAllWeakReferences(&evacuation_object_retainer); |
| + // Second pass on aborted pages. |
|
Hannes Payer (out of office)
2015/10/23 13:09:18
Please add a separate timer scope here.
Michael Lippautz
2015/10/23 13:27:05
Done.
|
| + for (int i = 0; i < npages; i++) { |
| + Page* p = evacuation_candidates_[i]; |
| + if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| + p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); |
| + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| + switch (space->identity()) { |
| + case OLD_SPACE: |
| + Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, |
| + IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); |
| + break; |
| + case CODE_SPACE: |
| + if (FLAG_zap_code_space) { |
| + Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, |
| + ZAP_FREE_SPACE>(space, NULL, p, nullptr); |
| + } else { |
| + Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, |
| + IGNORE_FREE_SPACE>(space, NULL, p, nullptr); |
| + } |
| + break; |
| + default: |
| + UNREACHABLE(); |
| + break; |
| + } |
| + } |
| + } |
| + |
| heap_->isolate()->inner_pointer_to_code_cache()->Flush(); |
| // The hashing of weak_object_to_code_table is no longer valid. |