| Index: src/heap/mark-compact.cc
|
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
|
| index 3e5dafe345a481f08f88f20fd971db40e8894af7..ab212d219d2eb459691c2fcee78dd906990cb969 100644
|
| --- a/src/heap/mark-compact.cc
|
| +++ b/src/heap/mark-compact.cc
|
| @@ -2872,11 +2872,12 @@ void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
|
|
|
| static void UpdatePointer(HeapObject** address, HeapObject* object) {
|
| MapWord map_word = object->map_word();
|
| - // The store buffer can still contain stale pointers in dead large objects.
|
| - // Ignore these pointers here.
|
| + // Since we only filter invalid slots in old space, the store buffer can
|
| + // still contain stale pointers in large object and in map spaces. Ignore
|
| + // these pointers here.
|
| DCHECK(map_word.IsForwardingAddress() ||
|
| - object->GetHeap()->lo_space()->FindPage(
|
| - reinterpret_cast<Address>(address)) != NULL);
|
| + !object->GetHeap()->old_space()->Contains(
|
| + reinterpret_cast<Address>(address)));
|
| if (map_word.IsForwardingAddress()) {
|
| // Update the corresponding slot.
|
| *address = map_word.ToForwardingAddress();
|
| @@ -3334,7 +3335,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
| // entries of such pages are filtered before rescanning.
|
| DCHECK(p->IsEvacuationCandidate());
|
| p->SetFlag(Page::COMPACTION_WAS_ABORTED);
|
| - p->set_scan_on_scavenge(true);
|
| abandoned_pages++;
|
| break;
|
| case MemoryChunk::kCompactingFinalize:
|
| @@ -3573,6 +3573,10 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
|
| page->markbits()->ClearRange(
|
| page->AddressToMarkbitIndex(page->area_start()),
|
| page->AddressToMarkbitIndex(object->address()));
|
| + if (page->old_to_new_slots() != nullptr) {
|
| + page->old_to_new_slots()->RemoveRange(
|
| + 0, static_cast<int>(object->address() - page->address()));
|
| + }
|
| RecomputeLiveBytes(page);
|
| }
|
| return false;
|
| @@ -3727,8 +3731,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
|
| // Update roots.
|
| heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
|
|
|
| - StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
|
| - &Heap::ScavengeStoreBufferCallback);
|
| heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
|
| }
|
|
|
| @@ -3815,14 +3817,12 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
|
| if (!p->IsEvacuationCandidate()) continue;
|
| PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
| space->Free(p->area_start(), p->area_size());
|
| - p->set_scan_on_scavenge(false);
|
| p->ResetLiveBytes();
|
| CHECK(p->SweepingDone());
|
| space->ReleasePage(p, true);
|
| }
|
| evacuation_candidates_.Rewind(0);
|
| compacting_ = false;
|
| - heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
|
| heap()->FreeQueuedChunks();
|
| }
|
|
|
|
|