Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 4fa3341b9587c35b7a2c1b5df33e9505e986c1de..502ef758a7799f36a805267d4fbaed8dd9930b69 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -848,7 +848,7 @@ void MarkCompactCollector::Prepare() { |
// If concurrent unmapping tasks are still running, we should wait for |
// them here. |
- heap()->WaitUntilUnmappingOfFreeChunksCompleted(); |
+ heap()->memory_allocator()->unmapper()->WaitUntilCompleted(); |
// Clear marking bits if incremental marking is aborted. |
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) { |
@@ -3541,7 +3541,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
// slots only handles old space (for unboxed doubles), and thus map space can |
// still contain stale pointers. We only free the chunks after pointer updates |
// to still have access to page headers. |
- heap()->FreeQueuedChunks(); |
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
{ |
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
@@ -3729,7 +3729,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { |
} |
evacuation_candidates_.Rewind(0); |
compacting_ = false; |
- heap()->FreeQueuedChunks(); |
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
} |
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, |