Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index c94e15757965bca5b07c7ad2d335a7e64094ff15..04cea9800f8daa9bf75ea454f5e6c202ab1f54db 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -65,8 +65,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) |
sweeping_in_progress_(false), |
compaction_in_progress_(false), |
pending_sweeper_tasks_semaphore_(0), |
- pending_compaction_tasks_semaphore_(0), |
- concurrent_compaction_tasks_active_(0) { |
+ pending_compaction_tasks_semaphore_(0) { |
} |
#ifdef VERIFY_HEAP |
@@ -481,24 +480,24 @@ void MarkCompactCollector::ClearMarkbits() { |
} |
-class MarkCompactCollector::CompactionTask : public v8::Task { |
+class MarkCompactCollector::CompactionTask : public CancelableTask { |
public: |
explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces) |
- : heap_(heap), spaces_(spaces) {} |
+ : CancelableTask(heap->isolate()), spaces_(spaces) {} |
virtual ~CompactionTask() {} |
private: |
- // v8::Task overrides. |
- void Run() override { |
- MarkCompactCollector* mark_compact = heap_->mark_compact_collector(); |
+ // v8::internal::CancelableTask overrides. |
+ void RunInternal() override { |
+ MarkCompactCollector* mark_compact = |
+ isolate()->heap()->mark_compact_collector(); |
SlotsBuffer* evacuation_slots_buffer = nullptr; |
mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer); |
mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer); |
mark_compact->pending_compaction_tasks_semaphore_.Signal(); |
} |
- Heap* heap_; |
CompactionSpaceCollection* spaces_; |
DISALLOW_COPY_AND_ASSIGN(CompactionTask); |
@@ -3199,7 +3198,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
} |
const int num_tasks = NumberOfParallelCompactionTasks(); |
- |
// Set up compaction spaces. |
CompactionSpaceCollection** compaction_spaces_for_tasks = |
new CompactionSpaceCollection*[num_tasks]; |
@@ -3214,17 +3212,20 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
compaction_in_progress_ = true; |
// Kick off parallel tasks. |
+ uint32_t* ids = new uint32_t[num_tasks - 1]; |
for (int i = 1; i < num_tasks; i++) { |
- concurrent_compaction_tasks_active_++; |
+ CompactionTask* task = |
Hannes Payer (out of office)
2015/11/16 11:31:19
To be in sync, we could also have a StartParallelC
Michael Lippautz
2015/11/16 12:06:38
Done.
|
+ new CompactionTask(heap(), compaction_spaces_for_tasks[i]); |
+ ids[i - 1] = task->id(); |
V8::GetCurrentPlatform()->CallOnBackgroundThread( |
- new CompactionTask(heap(), compaction_spaces_for_tasks[i]), |
- v8::Platform::kShortRunningTask); |
+ task, v8::Platform::kShortRunningTask); |
} |
// Contribute in main thread. Counter and signal are in principal not needed. |
EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_); |
- WaitUntilCompactionCompleted(); |
+ WaitUntilCompactionCompleted(ids, num_tasks - 1); |
+ delete[] ids; |
double compaction_duration = 0.0; |
intptr_t compacted_memory = 0; |
@@ -3290,10 +3291,16 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
} |
-void MarkCompactCollector::WaitUntilCompactionCompleted() { |
- while (concurrent_compaction_tasks_active_ > 0) { |
- pending_compaction_tasks_semaphore_.Wait(); |
- concurrent_compaction_tasks_active_--; |
+void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids, |
+ int len) { |
+ // Try to cancel compaction tasks that have not been run (as they might be |
+ // stuck in a worker queue). Tasks that cannot be canceled, have either |
+ // already completed or are still running, hence we need to wait for their |
+ // semaphore signal. |
+ for (int i = 0; i < len; i++) { |
+ if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) { |
+ pending_compaction_tasks_semaphore_.Wait(); |
+ } |
} |
compaction_in_progress_ = false; |
} |