Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index d4c619bd1953b6450999f6c57be5b71754152505..96768dab44228975e37710e9ac3379d94fa4deab 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -22,6 +22,7 @@ |
#include "src/heap/objects-visiting.h" |
#include "src/heap/objects-visiting-inl.h" |
#include "src/heap/slots-buffer.h" |
+#include "src/heap/spaces.h" |
#include "src/heap/spaces-inl.h" |
#include "src/heap-profiler.h" |
#include "src/ic/ic.h" |
@@ -58,6 +59,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) |
parallel_compaction_in_progress_(false), |
pending_sweeper_jobs_semaphore_(0), |
pending_compaction_jobs_semaphore_(0), |
+ concurrent_compaction_tasks_active_(0), |
evacuation_(false), |
slots_buffer_allocator_(nullptr), |
migration_slots_buffer_(nullptr), |
@@ -474,21 +476,22 @@ void MarkCompactCollector::ClearMarkbits() { |
class MarkCompactCollector::CompactionTask : public v8::Task { |
public: |
- explicit CompactionTask(Heap* heap) : heap_(heap) {} |
+ explicit CompactionTask(Heap* heap, CompactionSpaces* compaction_spaces) |
+ : heap_(heap), compaction_spaces_(compaction_spaces) {} |
virtual ~CompactionTask() {} |
private: |
// v8::Task overrides. |
void Run() override { |
- // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be |
- // called by one thread concurrently. |
- heap_->mark_compact_collector()->EvacuatePages(); |
+ heap_->mark_compact_collector()->EvacuatePagesUsingCompactionSpace( |
+ compaction_spaces_); |
heap_->mark_compact_collector() |
->pending_compaction_jobs_semaphore_.Signal(); |
} |
Heap* heap_; |
+ CompactionSpaces* compaction_spaces_; |
DISALLOW_COPY_AND_ASSIGN(CompactionTask); |
}; |
@@ -3323,6 +3326,45 @@ void MarkCompactCollector::EvacuateNewSpace() { |
} |
+bool MarkCompactCollector::EvacuateLiveObjectsFromPageWithoutEmergency( |
+ Page* p, PagedSpace* target_space) { |
+ AlwaysAllocateScope always_allocate(isolate()); |
+ DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
+ p->SetWasSwept(); |
+ |
+ int offsets[16]; |
+ |
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
+ Address cell_base = it.CurrentCellBase(); |
+ MarkBit::CellType* cell = it.CurrentCell(); |
+ |
+ if (*cell == 0) continue; |
+ |
+ int live_objects = MarkWordToObjectStarts(*cell, offsets); |
+ for (int i = 0; i < live_objects; i++) { |
+ Address object_addr = cell_base + offsets[i] * kPointerSize; |
+ HeapObject* object = HeapObject::FromAddress(object_addr); |
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
+ |
+ int size = object->Size(); |
+ AllocationAlignment alignment = object->RequiredAlignment(); |
+ HeapObject* target_object = nullptr; |
+ AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
+ if (!allocation.To(&target_object)) { |
+ return false; |
+ } |
+ MigrateObject(target_object, object, size, target_space->identity()); |
+ DCHECK(object->map_word().IsForwardingAddress()); |
+ } |
+ |
+ // Clear marking bits for current cell. |
+ *cell = 0; |
+ } |
+ p->ResetLiveBytes(); |
+ return true; |
+} |
+ |
+ |
void MarkCompactCollector::EvacuateLiveObjectsFromPage( |
Page* p, PagedSpace* target_space) { |
AlwaysAllocateScope always_allocate(isolate()); |
@@ -3371,15 +3413,137 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage( |
void MarkCompactCollector::EvacuatePagesInParallel() { |
+ // As soon as we have at least 7 pages to evacuate we kick off another task. |
+ // We keep a maximum of 2 tasks (including main thread) though. |
+ const int num_tasks = Min(1 + evacuation_candidates_.length() / 7, 2); |
+ if (num_tasks == 1) { |
+ // Use single-threaded version for now. |
+ EvacuatePages(); |
+ return; |
+ } |
+ |
+ // Set up compaction spaces. |
+ CompactionSpaces** compaction_spaces_for_tasks = |
+ new CompactionSpaces*[num_tasks]; |
+ FreeList** free_lists = new FreeList*[2 * num_tasks]; |
+ for (int i = 0; i < num_tasks; i++) { |
+ compaction_spaces_for_tasks[i] = new CompactionSpaces(heap()); |
+ free_lists[i] = compaction_spaces_for_tasks[i]->Get(OLD_SPACE)->free_list(); |
+ free_lists[i + num_tasks] = |
+ compaction_spaces_for_tasks[i]->Get(CODE_SPACE)->free_list(); |
+ } |
+ // Move over space to compaction spaces. If enough memory is available or we |
+ // want to preserve allocation order, this step can be omitted. |
+ heap()->old_space()->free_list()->Divide(free_lists, num_tasks); |
+ heap()->code_space()->free_list()->Divide(&free_lists[num_tasks], num_tasks); |
+ delete[] free_lists; |
+ |
parallel_compaction_in_progress_ = true; |
- V8::GetCurrentPlatform()->CallOnBackgroundThread( |
- new CompactionTask(heap()), v8::Platform::kShortRunningTask); |
+ // Kick off parallel tasks. |
+ for (int i = 1; i < num_tasks; i++) { |
+ concurrent_compaction_tasks_active_++; |
+ V8::GetCurrentPlatform()->CallOnBackgroundThread( |
+ new CompactionTask(heap(), compaction_spaces_for_tasks[i]), |
+ v8::Platform::kShortRunningTask); |
+ } |
+ |
+ // Contribute in main thread. Counter and signal are in principal not needed. |
+ concurrent_compaction_tasks_active_++; |
+ EvacuatePagesUsingCompactionSpace(compaction_spaces_for_tasks[0]); |
+ pending_compaction_jobs_semaphore_.Signal(); |
+ |
+ // Wait together. |
+ WaitUntilCompactionCompleted(); |
+ parallel_compaction_in_progress_ = false; |
+ |
+ // Merge back the compacted memory. |
+ for (int i = 0; i < num_tasks; i++) { |
+ heap()->old_space()->MergeCompactionSpace( |
+ compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); |
+ heap()->code_space()->MergeCompactionSpace( |
+ compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); |
+ delete compaction_spaces_for_tasks[i]; |
+ } |
+ delete[] compaction_spaces_for_tasks; |
+ |
+ // Finalize sequentially. |
+ const int num_pages = evacuation_candidates_.length(); |
+ int abandoned_pages = 0; |
+ for (int i = 0; i < num_pages; i++) { |
+ Page* p = evacuation_candidates_[i]; |
+ switch (p->parallel_compaction_state().Value()) { |
+ case MemoryChunk::ParallelCompactingState::kCompactingAborted: |
+ slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
+ p->ClearEvacuationCandidate(); |
+ p->SetFlag(Page::RESCAN_ON_EVACUATION); |
+ abandoned_pages++; |
+ break; |
+ case MemoryChunk::kCompactingFinalize: |
+ p->SetWasSwept(); |
+ p->Unlink(); |
+ break; |
+ case MemoryChunk::kNoEvacuationCandidate: |
+ break; |
+ default: |
+ // We should not observe kCompactingInProgress, or kCompactingDone. |
+ UNREACHABLE(); |
+ } |
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); |
+ } |
+ if (num_pages > 0) { |
+ if (FLAG_trace_fragmentation) { |
+ if (abandoned_pages != 0) { |
+ PrintF( |
+ " Abandon %d out of %d page compactions due to lack of memory\n", |
+ abandoned_pages, num_pages); |
+ } else { |
+ PrintF(" Compacted %d pages\n", num_pages); |
+ } |
+ } |
+ } |
} |
void MarkCompactCollector::WaitUntilCompactionCompleted() { |
- pending_compaction_jobs_semaphore_.Wait(); |
- parallel_compaction_in_progress_ = false; |
+ while (concurrent_compaction_tasks_active_ > 0) { |
+ pending_compaction_jobs_semaphore_.Wait(); |
+ concurrent_compaction_tasks_active_--; |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::EvacuatePagesUsingCompactionSpace( |
+ CompactionSpaces* compaction_spaces) { |
+ for (int i = 0; i < evacuation_candidates_.length(); i++) { |
+ Page* p = evacuation_candidates_[i]; |
+ DCHECK(p->IsEvacuationCandidate() || |
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
+ DCHECK(static_cast<int>(p->parallel_sweeping()) == |
+ MemoryChunk::SWEEPING_DONE); |
+ |
+ if (!p->parallel_compaction_state().TrySetValue( |
+ MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { |
+ continue; |
+ } |
+ |
+ // In principal reading IsEvacuationCandidate() should be fine, however, |
+ // we avoid reading the state when we don't have exclusive access. |
+ if (p->IsEvacuationCandidate()) { |
+ DCHECK_EQ(p->parallel_compaction_state().Value(), |
+ MemoryChunk::kCompactingInProgress); |
+ if (EvacuateLiveObjectsFromPageWithoutEmergency( |
+ p, compaction_spaces->Get(p->owner()->identity()))) { |
+ p->parallel_compaction_state().SetValue( |
+ MemoryChunk::kCompactingFinalize); |
+ } else { |
+ p->parallel_compaction_state().SetValue( |
+ MemoryChunk::kCompactingAborted); |
+ } |
+ } else { |
+ p->parallel_compaction_state().SetValue( |
+ MemoryChunk::kNoEvacuationCandidate); |
+ } |
+ } |
} |
@@ -3631,7 +3795,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
EvacuationScope evacuation_scope(this); |
if (FLAG_parallel_compaction) { |
EvacuatePagesInParallel(); |
- WaitUntilCompactionCompleted(); |
} else { |
EvacuatePages(); |
} |