OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 3249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3260 evacuation_slots_buffers_.Add(evacuation_slots_buffer); | 3260 evacuation_slots_buffers_.Add(evacuation_slots_buffer); |
3261 } | 3261 } |
3262 | 3262 |
3263 | 3263 |
3264 bool MarkCompactCollector::EvacuateLiveObjectsFromPage( | 3264 bool MarkCompactCollector::EvacuateLiveObjectsFromPage( |
3265 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) { | 3265 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) { |
3266 AlwaysAllocateScope always_allocate(isolate()); | 3266 AlwaysAllocateScope always_allocate(isolate()); |
3267 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); | 3267 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
3268 | 3268 |
3269 int offsets[16]; | 3269 int offsets[16]; |
3270 | |
3271 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | 3270 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
3272 Address cell_base = it.CurrentCellBase(); | 3271 Address cell_base = it.CurrentCellBase(); |
3273 MarkBit::CellType* cell = it.CurrentCell(); | 3272 MarkBit::CellType* cell = it.CurrentCell(); |
3274 | 3273 |
3275 if (*cell == 0) continue; | 3274 if (*cell == 0) continue; |
3276 | 3275 |
3277 int live_objects = MarkWordToObjectStarts(*cell, offsets); | 3276 int live_objects = MarkWordToObjectStarts(*cell, offsets); |
3278 for (int i = 0; i < live_objects; i++) { | 3277 for (int i = 0; i < live_objects; i++) { |
3279 Address object_addr = cell_base + offsets[i] * kPointerSize; | 3278 Address object_addr = cell_base + offsets[i] * kPointerSize; |
3280 HeapObject* object = HeapObject::FromAddress(object_addr); | 3279 HeapObject* object = HeapObject::FromAddress(object_addr); |
(...skipping 21 matching lines...) Expand all Loading... |
3302 // Clear marking bits for current cell. | 3301 // Clear marking bits for current cell. |
3303 *cell = 0; | 3302 *cell = 0; |
3304 } | 3303 } |
3305 p->ResetLiveBytes(); | 3304 p->ResetLiveBytes(); |
3306 return true; | 3305 return true; |
3307 } | 3306 } |
3308 | 3307 |
3309 | 3308 |
3310 int MarkCompactCollector::NumberOfParallelCompactionTasks() { | 3309 int MarkCompactCollector::NumberOfParallelCompactionTasks() { |
3311 if (!FLAG_parallel_compaction) return 1; | 3310 if (!FLAG_parallel_compaction) return 1; |
3312 // We cap the number of parallel compaction tasks by | 3311 // Compute the number of needed tasks based on a target compaction time, the |
| 3312 // profiled compaction speed and marked live memory. |
| 3313 // |
| 3314 // The number of parallel compaction tasks is limited by: |
| 3315 // - #evacuation pages |
3313 // - (#cores - 1) | 3316 // - (#cores - 1) |
3314 // - a value depending on the live memory in evacuation candidates | |
3315 // - a hard limit | 3317 // - a hard limit |
3316 // | 3318 const double kTargetCompactionTimeInMs = 1; |
3317 // TODO(mlippautz): Instead of basing the limit on live memory, we could also | |
3318 // compute the number from the time it takes to evacuate memory and a given | |
3319 // desired time in which compaction should be finished. | |
3320 const int kLiveMemoryPerCompactionTask = 2 * Page::kPageSize; | |
3321 const int kMaxCompactionTasks = 8; | 3319 const int kMaxCompactionTasks = 8; |
3322 int live_bytes = 0; | 3320 |
| 3321 intptr_t compaction_speed = |
| 3322 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| 3323 if (compaction_speed == 0) return 1; |
| 3324 |
| 3325 intptr_t live_bytes = 0; |
3323 for (Page* page : evacuation_candidates_) { | 3326 for (Page* page : evacuation_candidates_) { |
3324 live_bytes += page->LiveBytes(); | 3327 live_bytes += page->LiveBytes(); |
3325 } | 3328 } |
3326 return Min(kMaxCompactionTasks, | 3329 |
3327 Min(1 + live_bytes / kLiveMemoryPerCompactionTask, | 3330 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1); |
3328 Max(1, base::SysInfo::NumberOfProcessors() - 1))); | 3331 const int tasks = |
| 3332 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed / |
| 3333 kTargetCompactionTimeInMs); |
| 3334 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks); |
| 3335 const int tasks_capped_cores = Min(cores, tasks_capped_pages); |
| 3336 const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores); |
| 3337 return tasks_capped_hard; |
3329 } | 3338 } |
3330 | 3339 |
3331 | 3340 |
3332 void MarkCompactCollector::EvacuatePagesInParallel() { | 3341 void MarkCompactCollector::EvacuatePagesInParallel() { |
3333 const int num_pages = evacuation_candidates_.length(); | 3342 const int num_pages = evacuation_candidates_.length(); |
3334 if (num_pages == 0) return; | 3343 if (num_pages == 0) return; |
3335 | 3344 |
| 3345 // Used for trace summary. |
| 3346 intptr_t live_bytes = 0; |
| 3347 intptr_t compaction_speed = 0; |
| 3348 if (FLAG_trace_fragmentation) { |
| 3349 for (Page* page : evacuation_candidates_) { |
| 3350 live_bytes += page->LiveBytes(); |
| 3351 } |
| 3352 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| 3353 } |
3336 const int num_tasks = NumberOfParallelCompactionTasks(); | 3354 const int num_tasks = NumberOfParallelCompactionTasks(); |
3337 | 3355 |
| 3356 |
3338 // Set up compaction spaces. | 3357 // Set up compaction spaces. |
3339 CompactionSpaceCollection** compaction_spaces_for_tasks = | 3358 CompactionSpaceCollection** compaction_spaces_for_tasks = |
3340 new CompactionSpaceCollection*[num_tasks]; | 3359 new CompactionSpaceCollection*[num_tasks]; |
3341 for (int i = 0; i < num_tasks; i++) { | 3360 for (int i = 0; i < num_tasks; i++) { |
3342 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); | 3361 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); |
3343 } | 3362 } |
3344 | 3363 |
3345 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 3364 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, |
3346 num_tasks); | 3365 num_tasks); |
3347 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 3366 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, |
3348 num_tasks); | 3367 num_tasks); |
3349 | 3368 |
3350 compaction_in_progress_ = true; | 3369 compaction_in_progress_ = true; |
3351 // Kick off parallel tasks. | 3370 // Kick off parallel tasks. |
3352 for (int i = 1; i < num_tasks; i++) { | 3371 for (int i = 1; i < num_tasks; i++) { |
3353 concurrent_compaction_tasks_active_++; | 3372 concurrent_compaction_tasks_active_++; |
3354 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 3373 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
3355 new CompactionTask(heap(), compaction_spaces_for_tasks[i]), | 3374 new CompactionTask(heap(), compaction_spaces_for_tasks[i]), |
3356 v8::Platform::kShortRunningTask); | 3375 v8::Platform::kShortRunningTask); |
3357 } | 3376 } |
3358 | 3377 |
3359 // Contribute in main thread. Counter and signal are in principal not needed. | 3378 // Contribute in main thread. Counter and signal are in principal not needed. |
3360 EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_); | 3379 EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_); |
3361 | 3380 |
3362 WaitUntilCompactionCompleted(); | 3381 WaitUntilCompactionCompleted(); |
3363 | 3382 |
| 3383 double compaction_duration = 0.0; |
| 3384 intptr_t compacted_memory = 0; |
3364 // Merge back memory (compacted and unused) from compaction spaces. | 3385 // Merge back memory (compacted and unused) from compaction spaces. |
3365 for (int i = 0; i < num_tasks; i++) { | 3386 for (int i = 0; i < num_tasks; i++) { |
3366 heap()->old_space()->MergeCompactionSpace( | 3387 heap()->old_space()->MergeCompactionSpace( |
3367 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); | 3388 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); |
3368 heap()->code_space()->MergeCompactionSpace( | 3389 heap()->code_space()->MergeCompactionSpace( |
3369 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); | 3390 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); |
| 3391 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted(); |
| 3392 compaction_duration += compaction_spaces_for_tasks[i]->duration(); |
3370 delete compaction_spaces_for_tasks[i]; | 3393 delete compaction_spaces_for_tasks[i]; |
3371 } | 3394 } |
3372 delete[] compaction_spaces_for_tasks; | 3395 delete[] compaction_spaces_for_tasks; |
| 3396 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory); |
3373 | 3397 |
3374 // Finalize sequentially. | 3398 // Finalize sequentially. |
3375 int abandoned_pages = 0; | 3399 int abandoned_pages = 0; |
3376 for (int i = 0; i < num_pages; i++) { | 3400 for (int i = 0; i < num_pages; i++) { |
3377 Page* p = evacuation_candidates_[i]; | 3401 Page* p = evacuation_candidates_[i]; |
3378 switch (p->parallel_compaction_state().Value()) { | 3402 switch (p->parallel_compaction_state().Value()) { |
3379 case MemoryChunk::ParallelCompactingState::kCompactingAborted: | 3403 case MemoryChunk::ParallelCompactingState::kCompactingAborted: |
3380 // We have partially compacted the page, i.e., some objects may have | 3404 // We have partially compacted the page, i.e., some objects may have |
3381 // moved, others are still in place. | 3405 // moved, others are still in place. |
3382 // We need to: | 3406 // We need to: |
(...skipping 20 matching lines...) Expand all Loading... |
3403 break; | 3427 break; |
3404 default: | 3428 default: |
3405 // We should not observe kCompactingInProgress, or kCompactingDone. | 3429 // We should not observe kCompactingInProgress, or kCompactingDone. |
3406 UNREACHABLE(); | 3430 UNREACHABLE(); |
3407 } | 3431 } |
3408 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | 3432 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); |
3409 } | 3433 } |
3410 if (FLAG_trace_fragmentation) { | 3434 if (FLAG_trace_fragmentation) { |
3411 PrintIsolate(isolate(), | 3435 PrintIsolate(isolate(), |
3412 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " | 3436 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " |
3413 "tasks=%d cores=%d\n", | 3437 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX |
| 3438 "d compaction_speed=%" V8_PTR_PREFIX "d\n", |
3414 isolate()->time_millis_since_init(), FLAG_parallel_compaction, | 3439 isolate()->time_millis_since_init(), FLAG_parallel_compaction, |
3415 num_pages, abandoned_pages, num_tasks, | 3440 num_pages, abandoned_pages, num_tasks, |
3416 base::SysInfo::NumberOfProcessors()); | 3441 base::SysInfo::NumberOfProcessors(), live_bytes, |
| 3442 compaction_speed); |
3417 } | 3443 } |
3418 } | 3444 } |
3419 | 3445 |
3420 | 3446 |
3421 void MarkCompactCollector::WaitUntilCompactionCompleted() { | 3447 void MarkCompactCollector::WaitUntilCompactionCompleted() { |
3422 while (concurrent_compaction_tasks_active_ > 0) { | 3448 while (concurrent_compaction_tasks_active_ > 0) { |
3423 pending_compaction_tasks_semaphore_.Wait(); | 3449 pending_compaction_tasks_semaphore_.Wait(); |
3424 concurrent_compaction_tasks_active_--; | 3450 concurrent_compaction_tasks_active_--; |
3425 } | 3451 } |
3426 compaction_in_progress_ = false; | 3452 compaction_in_progress_ = false; |
3427 } | 3453 } |
3428 | 3454 |
3429 | 3455 |
3430 void MarkCompactCollector::EvacuatePages( | 3456 void MarkCompactCollector::EvacuatePages( |
3431 CompactionSpaceCollection* compaction_spaces, | 3457 CompactionSpaceCollection* compaction_spaces, |
3432 SlotsBuffer** evacuation_slots_buffer) { | 3458 SlotsBuffer** evacuation_slots_buffer) { |
3433 for (int i = 0; i < evacuation_candidates_.length(); i++) { | 3459 for (int i = 0; i < evacuation_candidates_.length(); i++) { |
3434 Page* p = evacuation_candidates_[i]; | 3460 Page* p = evacuation_candidates_[i]; |
3435 DCHECK(p->IsEvacuationCandidate() || | 3461 DCHECK(p->IsEvacuationCandidate() || |
3436 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3462 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3437 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == | 3463 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == |
3438 MemoryChunk::kSweepingDone); | 3464 MemoryChunk::kSweepingDone); |
3439 if (p->parallel_compaction_state().TrySetValue( | 3465 if (p->parallel_compaction_state().TrySetValue( |
3440 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | 3466 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { |
3441 if (p->IsEvacuationCandidate()) { | 3467 if (p->IsEvacuationCandidate()) { |
3442 DCHECK_EQ(p->parallel_compaction_state().Value(), | 3468 DCHECK_EQ(p->parallel_compaction_state().Value(), |
3443 MemoryChunk::kCompactingInProgress); | 3469 MemoryChunk::kCompactingInProgress); |
| 3470 double start = heap()->MonotonicallyIncreasingTimeInMs(); |
| 3471 intptr_t live_bytes = p->LiveBytes(); |
3444 if (EvacuateLiveObjectsFromPage( | 3472 if (EvacuateLiveObjectsFromPage( |
3445 p, compaction_spaces->Get(p->owner()->identity()), | 3473 p, compaction_spaces->Get(p->owner()->identity()), |
3446 evacuation_slots_buffer)) { | 3474 evacuation_slots_buffer)) { |
3447 p->parallel_compaction_state().SetValue( | 3475 p->parallel_compaction_state().SetValue( |
3448 MemoryChunk::kCompactingFinalize); | 3476 MemoryChunk::kCompactingFinalize); |
| 3477 compaction_spaces->ReportCompactionProgress( |
| 3478 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); |
3449 } else { | 3479 } else { |
3450 p->parallel_compaction_state().SetValue( | 3480 p->parallel_compaction_state().SetValue( |
3451 MemoryChunk::kCompactingAborted); | 3481 MemoryChunk::kCompactingAborted); |
3452 } | 3482 } |
3453 } else { | 3483 } else { |
3454 // There could be popular pages in the list of evacuation candidates | 3484 // There could be popular pages in the list of evacuation candidates |
3455 // which we do compact. | 3485 // which we do compact. |
3456 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | 3486 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); |
3457 } | 3487 } |
3458 } | 3488 } |
(...skipping 1141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4600 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4630 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4601 if (Marking::IsBlack(mark_bit)) { | 4631 if (Marking::IsBlack(mark_bit)) { |
4602 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4632 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
4603 RecordRelocSlot(&rinfo, target); | 4633 RecordRelocSlot(&rinfo, target); |
4604 } | 4634 } |
4605 } | 4635 } |
4606 } | 4636 } |
4607 | 4637 |
4608 } // namespace internal | 4638 } // namespace internal |
4609 } // namespace v8 | 4639 } // namespace v8 |
OLD | NEW |