Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index c94e15757965bca5b07c7ad2d335a7e64094ff15..ba61ec3ad41adb685f6b6acddb1bf479200d4f02 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -65,8 +65,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) |
sweeping_in_progress_(false), |
compaction_in_progress_(false), |
pending_sweeper_tasks_semaphore_(0), |
- pending_compaction_tasks_semaphore_(0), |
- concurrent_compaction_tasks_active_(0) { |
+ pending_compaction_tasks_semaphore_(0) { |
} |
#ifdef VERIFY_HEAP |
@@ -481,24 +480,24 @@ void MarkCompactCollector::ClearMarkbits() { |
} |
-class MarkCompactCollector::CompactionTask : public v8::Task { |
+class MarkCompactCollector::CompactionTask : public CancelableTask { |
public: |
explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces) |
- : heap_(heap), spaces_(spaces) {} |
+ : CancelableTask(heap->isolate()), spaces_(spaces) {} |
virtual ~CompactionTask() {} |
private: |
- // v8::Task overrides. |
- void Run() override { |
- MarkCompactCollector* mark_compact = heap_->mark_compact_collector(); |
+ // v8::internal::CancelableTask overrides. |
+ void RunInternal() override { |
+ MarkCompactCollector* mark_compact = |
+ isolate()->heap()->mark_compact_collector(); |
SlotsBuffer* evacuation_slots_buffer = nullptr; |
mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer); |
mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer); |
mark_compact->pending_compaction_tasks_semaphore_.Signal(); |
} |
- Heap* heap_; |
CompactionSpaceCollection* spaces_; |
DISALLOW_COPY_AND_ASSIGN(CompactionTask); |
@@ -3199,7 +3198,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
} |
const int num_tasks = NumberOfParallelCompactionTasks(); |
- |
// Set up compaction spaces. |
CompactionSpaceCollection** compaction_spaces_for_tasks = |
new CompactionSpaceCollection*[num_tasks]; |
@@ -3214,17 +3212,29 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
compaction_in_progress_ = true; |
// Kick off parallel tasks. |
+ uint32_t* ids = new uint32_t[num_tasks - 1]; |
for (int i = 1; i < num_tasks; i++) { |
- concurrent_compaction_tasks_active_++; |
+ CompactionTask* task = |
+ new CompactionTask(heap(), compaction_spaces_for_tasks[i]); |
+ ids[i - 1] = task->id(); |
V8::GetCurrentPlatform()->CallOnBackgroundThread( |
- new CompactionTask(heap(), compaction_spaces_for_tasks[i]), |
- v8::Platform::kShortRunningTask); |
+ task, v8::Platform::kShortRunningTask); |
} |
// Contribute in main thread. Counter and signal are in principal not needed. |
EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_); |
- WaitUntilCompactionCompleted(); |
+ // Try to cancel compaction tasks that have not been run (as they might be |
Michael Lippautz
2015/11/16 08:08:29
I didn't move this to its own function because as
Hannes Payer (out of office)
2015/11/16 11:06:24
I would have preferred keeping it. It gives the co
Michael Lippautz
2015/11/16 11:22:33
Alright, done.
|
+ // stuck in a worker queue). Tasks that cannot be canceled, have either |
+ // already completed or are still running, hence we need to wait for their |
+ // semaphore signal. |
+ for (int i = 1; i < num_tasks; i++) { |
+ if (!heap()->isolate()->cancelable_task_manager()->TryAbort(ids[i - 1])) { |
+ pending_compaction_tasks_semaphore_.Wait(); |
+ } |
+ } |
+ delete[] ids; |
+ compaction_in_progress_ = false; |
double compaction_duration = 0.0; |
intptr_t compacted_memory = 0; |
@@ -3290,15 +3300,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() { |
} |
-void MarkCompactCollector::WaitUntilCompactionCompleted() { |
- while (concurrent_compaction_tasks_active_ > 0) { |
- pending_compaction_tasks_semaphore_.Wait(); |
- concurrent_compaction_tasks_active_--; |
- } |
- compaction_in_progress_ = false; |
-} |
- |
- |
void MarkCompactCollector::EvacuatePages( |
CompactionSpaceCollection* compaction_spaces, |
SlotsBuffer** evacuation_slots_buffer) { |