Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(12)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1330463002: [not for landing, heap] Parallel compaction in main GC pause (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase: Remove obsolete changes Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h" 10 #include "src/compilation-cache.h"
11 #include "src/cpu-profiler.h" 11 #include "src/cpu-profiler.h"
12 #include "src/deoptimizer.h" 12 #include "src/deoptimizer.h"
13 #include "src/execution.h" 13 #include "src/execution.h"
14 #include "src/frames-inl.h" 14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h" 15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h" 16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h" 17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h" 18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h" 19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h" 20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h" 21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting.h" 22 #include "src/heap/objects-visiting.h"
23 #include "src/heap/objects-visiting-inl.h" 23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/slots-buffer.h" 24 #include "src/heap/slots-buffer.h"
25 #include "src/heap/spaces.h"
25 #include "src/heap/spaces-inl.h" 26 #include "src/heap/spaces-inl.h"
26 #include "src/heap-profiler.h" 27 #include "src/heap-profiler.h"
27 #include "src/ic/ic.h" 28 #include "src/ic/ic.h"
28 #include "src/ic/stub-cache.h" 29 #include "src/ic/stub-cache.h"
29 #include "src/v8.h" 30 #include "src/v8.h"
30 31
31 namespace v8 { 32 namespace v8 {
32 namespace internal { 33 namespace internal {
33 34
34 35
(...skipping 16 matching lines...) Expand all
51 #ifdef DEBUG 52 #ifdef DEBUG
52 state_(IDLE), 53 state_(IDLE),
53 #endif 54 #endif
54 marking_parity_(ODD_MARKING_PARITY), 55 marking_parity_(ODD_MARKING_PARITY),
55 compacting_(false), 56 compacting_(false),
56 was_marked_incrementally_(false), 57 was_marked_incrementally_(false),
57 sweeping_in_progress_(false), 58 sweeping_in_progress_(false),
58 parallel_compaction_in_progress_(false), 59 parallel_compaction_in_progress_(false),
59 pending_sweeper_jobs_semaphore_(0), 60 pending_sweeper_jobs_semaphore_(0),
60 pending_compaction_jobs_semaphore_(0), 61 pending_compaction_jobs_semaphore_(0),
62 concurrent_compaction_tasks_active_(0),
61 evacuation_(false), 63 evacuation_(false),
62 slots_buffer_allocator_(nullptr), 64 slots_buffer_allocator_(nullptr),
63 migration_slots_buffer_(nullptr), 65 migration_slots_buffer_(nullptr),
64 heap_(heap), 66 heap_(heap),
65 marking_deque_memory_(NULL), 67 marking_deque_memory_(NULL),
66 marking_deque_memory_committed_(0), 68 marking_deque_memory_committed_(0),
67 code_flusher_(NULL), 69 code_flusher_(NULL),
68 have_code_to_deoptimize_(false) { 70 have_code_to_deoptimize_(false) {
69 } 71 }
70 72
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after
467 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 469 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
468 Marking::MarkWhite(Marking::MarkBitFrom(obj)); 470 Marking::MarkWhite(Marking::MarkBitFrom(obj));
469 Page::FromAddress(obj->address())->ResetProgressBar(); 471 Page::FromAddress(obj->address())->ResetProgressBar();
470 Page::FromAddress(obj->address())->ResetLiveBytes(); 472 Page::FromAddress(obj->address())->ResetLiveBytes();
471 } 473 }
472 } 474 }
473 475
474 476
475 class MarkCompactCollector::CompactionTask : public v8::Task { 477 class MarkCompactCollector::CompactionTask : public v8::Task {
476 public: 478 public:
477 explicit CompactionTask(Heap* heap) : heap_(heap) {} 479 explicit CompactionTask(Heap* heap, CompactionSpaces* compaction_spaces)
480 : heap_(heap), compaction_spaces_(compaction_spaces) {}
478 481
479 virtual ~CompactionTask() {} 482 virtual ~CompactionTask() {}
480 483
481 private: 484 private:
482 // v8::Task overrides. 485 // v8::Task overrides.
483 void Run() override { 486 void Run() override {
484 // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be 487 heap_->mark_compact_collector()->EvacuatePagesUsingCompactionSpace(
485 // called by one thread concurrently. 488 compaction_spaces_);
486 heap_->mark_compact_collector()->EvacuatePages();
487 heap_->mark_compact_collector() 489 heap_->mark_compact_collector()
488 ->pending_compaction_jobs_semaphore_.Signal(); 490 ->pending_compaction_jobs_semaphore_.Signal();
489 } 491 }
490 492
491 Heap* heap_; 493 Heap* heap_;
494 CompactionSpaces* compaction_spaces_;
492 495
493 DISALLOW_COPY_AND_ASSIGN(CompactionTask); 496 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
494 }; 497 };
495 498
496 499
497 class MarkCompactCollector::SweeperTask : public v8::Task { 500 class MarkCompactCollector::SweeperTask : public v8::Task {
498 public: 501 public:
499 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} 502 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
500 503
501 virtual ~SweeperTask() {} 504 virtual ~SweeperTask() {}
(...skipping 2814 matching lines...) Expand 10 before | Expand all | Expand 10 after
3316 while (it.has_next()) { 3319 while (it.has_next()) {
3317 NewSpacePage* p = it.next(); 3320 NewSpacePage* p = it.next();
3318 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); 3321 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3319 } 3322 }
3320 3323
3321 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3324 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3322 new_space->set_age_mark(new_space->top()); 3325 new_space->set_age_mark(new_space->top());
3323 } 3326 }
3324 3327
3325 3328
3329 bool MarkCompactCollector::EvacuateLiveObjectsFromPageWithoutEmergency(
3330 Page* p, PagedSpace* target_space) {
3331 AlwaysAllocateScope always_allocate(isolate());
3332 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3333 p->SetWasSwept();
3334
3335 int offsets[16];
3336
3337 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3338 Address cell_base = it.CurrentCellBase();
3339 MarkBit::CellType* cell = it.CurrentCell();
3340
3341 if (*cell == 0) continue;
3342
3343 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3344 for (int i = 0; i < live_objects; i++) {
3345 Address object_addr = cell_base + offsets[i] * kPointerSize;
3346 HeapObject* object = HeapObject::FromAddress(object_addr);
3347 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3348
3349 int size = object->Size();
3350 AllocationAlignment alignment = object->RequiredAlignment();
3351 HeapObject* target_object = nullptr;
3352 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
3353 if (!allocation.To(&target_object)) {
3354 return false;
3355 }
3356 MigrateObject(target_object, object, size, target_space->identity());
3357 DCHECK(object->map_word().IsForwardingAddress());
3358 }
3359
3360 // Clear marking bits for current cell.
3361 *cell = 0;
3362 }
3363 p->ResetLiveBytes();
3364 return true;
3365 }
3366
3367
3326 void MarkCompactCollector::EvacuateLiveObjectsFromPage( 3368 void MarkCompactCollector::EvacuateLiveObjectsFromPage(
3327 Page* p, PagedSpace* target_space) { 3369 Page* p, PagedSpace* target_space) {
3328 AlwaysAllocateScope always_allocate(isolate()); 3370 AlwaysAllocateScope always_allocate(isolate());
3329 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); 3371 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3330 p->SetWasSwept(); 3372 p->SetWasSwept();
3331 3373
3332 int offsets[16]; 3374 int offsets[16];
3333 3375
3334 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 3376 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3335 Address cell_base = it.CurrentCellBase(); 3377 Address cell_base = it.CurrentCellBase();
(...skipping 28 matching lines...) Expand all
3364 } 3406 }
3365 3407
3366 // Clear marking bits for current cell. 3408 // Clear marking bits for current cell.
3367 *cell = 0; 3409 *cell = 0;
3368 } 3410 }
3369 p->ResetLiveBytes(); 3411 p->ResetLiveBytes();
3370 } 3412 }
3371 3413
3372 3414
3373 void MarkCompactCollector::EvacuatePagesInParallel() { 3415 void MarkCompactCollector::EvacuatePagesInParallel() {
3416 // As soon as we have at least 7 pages to evacuate we kick off another task.
3417 // We keep a maximum of 2 tasks (including main thread) though.
3418 const int num_tasks = Min(1 + evacuation_candidates_.length() / 7, 2);
3419 if (num_tasks == 1) {
3420 // Use single-threaded version for now.
3421 EvacuatePages();
3422 return;
3423 }
3424
3425 // Set up compaction spaces.
3426 CompactionSpaces** compaction_spaces_for_tasks =
3427 new CompactionSpaces*[num_tasks];
3428 FreeList** free_lists = new FreeList*[2 * num_tasks];
3429 for (int i = 0; i < num_tasks; i++) {
3430 compaction_spaces_for_tasks[i] = new CompactionSpaces(heap());
3431 free_lists[i] = compaction_spaces_for_tasks[i]->Get(OLD_SPACE)->free_list();
3432 free_lists[i + num_tasks] =
3433 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)->free_list();
3434 }
3435 // Move over space to compaction spaces. If enough memory is available or we
3436 // want to preserve allocation order, this step can be omitted.
3437 heap()->old_space()->free_list()->Divide(free_lists, num_tasks);
3438 heap()->code_space()->free_list()->Divide(&free_lists[num_tasks], num_tasks);
3439 delete[] free_lists;
3440
3374 parallel_compaction_in_progress_ = true; 3441 parallel_compaction_in_progress_ = true;
3375 V8::GetCurrentPlatform()->CallOnBackgroundThread( 3442 // Kick off parallel tasks.
3376 new CompactionTask(heap()), v8::Platform::kShortRunningTask); 3443 for (int i = 1; i < num_tasks; i++) {
3444 concurrent_compaction_tasks_active_++;
3445 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3446 new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
3447 v8::Platform::kShortRunningTask);
3448 }
3449
3450 // Contribute in main thread. Counter and signal are in principal not needed.
3451 concurrent_compaction_tasks_active_++;
3452 EvacuatePagesUsingCompactionSpace(compaction_spaces_for_tasks[0]);
3453 pending_compaction_jobs_semaphore_.Signal();
3454
3455 // Wait together.
3456 WaitUntilCompactionCompleted();
3457 parallel_compaction_in_progress_ = false;
3458
3459 // Merge back the compacted memory.
3460 for (int i = 0; i < num_tasks; i++) {
3461 heap()->old_space()->MergeCompactionSpace(
3462 compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3463 heap()->code_space()->MergeCompactionSpace(
3464 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3465 delete compaction_spaces_for_tasks[i];
3466 }
3467 delete[] compaction_spaces_for_tasks;
3468
3469 // Finalize sequentially.
3470 const int num_pages = evacuation_candidates_.length();
3471 int abandoned_pages = 0;
3472 for (int i = 0; i < num_pages; i++) {
3473 Page* p = evacuation_candidates_[i];
3474 switch (p->parallel_compaction_state().Value()) {
3475 case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3476 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
3477 p->ClearEvacuationCandidate();
3478 p->SetFlag(Page::RESCAN_ON_EVACUATION);
3479 abandoned_pages++;
3480 break;
3481 case MemoryChunk::kCompactingFinalize:
3482 p->SetWasSwept();
3483 p->Unlink();
3484 break;
3485 case MemoryChunk::kNoEvacuationCandidate:
3486 break;
3487 default:
3488 // We should not observe kCompactingInProgress, or kCompactingDone.
3489 UNREACHABLE();
3490 }
3491 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3492 }
3493 if (num_pages > 0) {
3494 if (FLAG_trace_fragmentation) {
3495 if (abandoned_pages != 0) {
3496 PrintF(
3497 " Abandon %d out of %d page compactions due to lack of memory\n",
3498 abandoned_pages, num_pages);
3499 } else {
3500 PrintF(" Compacted %d pages\n", num_pages);
3501 }
3502 }
3503 }
3377 } 3504 }
3378 3505
3379 3506
3380 void MarkCompactCollector::WaitUntilCompactionCompleted() { 3507 void MarkCompactCollector::WaitUntilCompactionCompleted() {
3381 pending_compaction_jobs_semaphore_.Wait(); 3508 while (concurrent_compaction_tasks_active_ > 0) {
3382 parallel_compaction_in_progress_ = false; 3509 pending_compaction_jobs_semaphore_.Wait();
3510 concurrent_compaction_tasks_active_--;
3511 }
3383 } 3512 }
3384 3513
3385 3514
3515 void MarkCompactCollector::EvacuatePagesUsingCompactionSpace(
3516 CompactionSpaces* compaction_spaces) {
3517 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3518 Page* p = evacuation_candidates_[i];
3519 DCHECK(p->IsEvacuationCandidate() ||
3520 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3521 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3522 MemoryChunk::SWEEPING_DONE);
3523
3524 if (!p->parallel_compaction_state().TrySetValue(
3525 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3526 continue;
3527 }
3528
3529 // In principal reading IsEvacuationCandidate() should be fine, however,
3530 // we avoid reading the state when we don't have exclusive access.
3531 if (p->IsEvacuationCandidate()) {
3532 DCHECK_EQ(p->parallel_compaction_state().Value(),
3533 MemoryChunk::kCompactingInProgress);
3534 if (EvacuateLiveObjectsFromPageWithoutEmergency(
3535 p, compaction_spaces->Get(p->owner()->identity()))) {
3536 p->parallel_compaction_state().SetValue(
3537 MemoryChunk::kCompactingFinalize);
3538 } else {
3539 p->parallel_compaction_state().SetValue(
3540 MemoryChunk::kCompactingAborted);
3541 }
3542 } else {
3543 p->parallel_compaction_state().SetValue(
3544 MemoryChunk::kNoEvacuationCandidate);
3545 }
3546 }
3547 }
3548
3549
3386 void MarkCompactCollector::EvacuatePages() { 3550 void MarkCompactCollector::EvacuatePages() {
3387 int npages = evacuation_candidates_.length(); 3551 int npages = evacuation_candidates_.length();
3388 int abandoned_pages = 0; 3552 int abandoned_pages = 0;
3389 for (int i = 0; i < npages; i++) { 3553 for (int i = 0; i < npages; i++) {
3390 Page* p = evacuation_candidates_[i]; 3554 Page* p = evacuation_candidates_[i];
3391 DCHECK(p->IsEvacuationCandidate() || 3555 DCHECK(p->IsEvacuationCandidate() ||
3392 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3556 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3393 DCHECK(static_cast<int>(p->parallel_sweeping()) == 3557 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3394 MemoryChunk::SWEEPING_DONE); 3558 MemoryChunk::SWEEPING_DONE);
3395 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3559 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
3624 EvacuationScope evacuation_scope(this); 3788 EvacuationScope evacuation_scope(this);
3625 EvacuateNewSpace(); 3789 EvacuateNewSpace();
3626 } 3790 }
3627 3791
3628 { 3792 {
3629 GCTracer::Scope gc_scope(heap()->tracer(), 3793 GCTracer::Scope gc_scope(heap()->tracer(),
3630 GCTracer::Scope::MC_EVACUATE_PAGES); 3794 GCTracer::Scope::MC_EVACUATE_PAGES);
3631 EvacuationScope evacuation_scope(this); 3795 EvacuationScope evacuation_scope(this);
3632 if (FLAG_parallel_compaction) { 3796 if (FLAG_parallel_compaction) {
3633 EvacuatePagesInParallel(); 3797 EvacuatePagesInParallel();
3634 WaitUntilCompactionCompleted();
3635 } else { 3798 } else {
3636 EvacuatePages(); 3799 EvacuatePages();
3637 } 3800 }
3638 } 3801 }
3639 3802
3640 // Second pass: find pointers to new space and update them. 3803 // Second pass: find pointers to new space and update them.
3641 PointersUpdatingVisitor updating_visitor(heap()); 3804 PointersUpdatingVisitor updating_visitor(heap());
3642 3805
3643 { 3806 {
3644 GCTracer::Scope gc_scope(heap()->tracer(), 3807 GCTracer::Scope gc_scope(heap()->tracer(),
(...skipping 871 matching lines...) Expand 10 before | Expand all | Expand 10 after
4516 MarkBit mark_bit = Marking::MarkBitFrom(host); 4679 MarkBit mark_bit = Marking::MarkBitFrom(host);
4517 if (Marking::IsBlack(mark_bit)) { 4680 if (Marking::IsBlack(mark_bit)) {
4518 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4681 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4519 RecordRelocSlot(&rinfo, target); 4682 RecordRelocSlot(&rinfo, target);
4520 } 4683 }
4521 } 4684 }
4522 } 4685 }
4523 4686
4524 } // namespace internal 4687 } // namespace internal
4525 } // namespace v8 4688 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698