Chromium Code Reviews| Index: src/heap/mark-compact.cc |
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
| index e26653495ecf2ae7ff6bb9211798212dec8cbc1b..2770546955453cc36cad8f2c3ccec81d971083be 100644 |
| --- a/src/heap/mark-compact.cc |
| +++ b/src/heap/mark-compact.cc |
| @@ -608,6 +608,27 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) { |
| } |
| +void MarkCompactCollector::RefillFreeList(CompactionSpace* space) { |
|
ulan
2015/10/09 11:48:48
Will this function be used in other CL?
Michael Lippautz
2015/10/09 12:20:12
The function should already be used as PagedSpace:
|
| + FreeList* free_list = nullptr; |
| + if (space->identity() == OLD_SPACE) { |
| + free_list = free_list_old_space_.get(); |
| + } else if (space->identity() == CODE_SPACE) { |
| + free_list = free_list_code_space_.get(); |
| + } else { |
| + UNREACHABLE(); |
| + } |
| + |
| + intptr_t kWantedMemory = 500 * KB; |
|
ulan
2015/10/09 11:48:48
This constant is duplicated below. Let's unify the
Michael Lippautz
2015/10/09 12:20:13
Unified as MarkCompactCollector::kCompactionMemory
|
| + intptr_t refilled = 0; |
| + while (refilled < kWantedMemory) { |
| + FreeSpace* node = free_list->TryRemoveMemory(kWantedMemory - refilled); |
| + if (node == nullptr) return; |
| + refilled += node->size(); |
| + space->AddMemory(node->address(), node->size()); |
| + } |
| +} |
| + |
| + |
| void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { |
| // This is only used when resizing an object. |
| DCHECK(MemoryChunk::FromAddress(old_start) == |
| @@ -3397,11 +3418,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); |
| } |
| - compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory( |
| - heap()->old_space()); |
| - compaction_spaces_for_tasks[0] |
| - ->Get(CODE_SPACE) |
| - ->MoveOverFreeMemory(heap()->code_space()); |
| + const intptr_t kWantedMemory = 500 * KB; |
|
Michael Lippautz
2015/10/09 11:08:59
We could branch in a fast case here for 1 compact
|
| + heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, |
| + num_tasks, kWantedMemory); |
| + heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, |
| + num_tasks, kWantedMemory); |
| compaction_in_progress_ = true; |
| // Kick off parallel tasks. |
| @@ -3413,9 +3434,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
| } |
| // Contribute in main thread. Counter and signal are in principal not needed. |
| - concurrent_compaction_tasks_active_++; |
| EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_); |
| - pending_compaction_tasks_semaphore_.Signal(); |
| WaitUntilCompactionCompleted(); |