Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 502ef758a7799f36a805267d4fbaed8dd9930b69..4fa3341b9587c35b7a2c1b5df33e9505e986c1de 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -848,7 +848,7 @@ |
// If concurrent unmapping tasks are still running, we should wait for |
// them here. |
- heap()->memory_allocator()->unmapper()->WaitUntilCompleted(); |
+ heap()->WaitUntilUnmappingOfFreeChunksCompleted(); |
// Clear marking bits if incremental marking is aborted. |
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) { |
@@ -3541,7 +3541,7 @@ |
// slots only handles old space (for unboxed doubles), and thus map space can |
// still contain stale pointers. We only free the chunks after pointer updates |
// to still have access to page headers. |
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
+ heap()->FreeQueuedChunks(); |
{ |
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
@@ -3729,7 +3729,7 @@ |
} |
evacuation_candidates_.Rewind(0); |
compacting_ = false; |
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
+ heap()->FreeQueuedChunks(); |
} |
int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, |