| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 // The following has to hold in order for {Marking::MarkBitFrom} to not produce | 42 // The following has to hold in order for {Marking::MarkBitFrom} to not produce |
| 43 // invalid {kImpossibleBitPattern} in the marking bitmap by overlapping. | 43 // invalid {kImpossibleBitPattern} in the marking bitmap by overlapping. |
| 44 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2); | 44 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2); |
| 45 | 45 |
| 46 | 46 |
| 47 // ------------------------------------------------------------------------- | 47 // ------------------------------------------------------------------------- |
| 48 // MarkCompactCollector | 48 // MarkCompactCollector |
| 49 | 49 |
| 50 MarkCompactCollector::MarkCompactCollector(Heap* heap) | 50 MarkCompactCollector::MarkCompactCollector(Heap* heap) |
| 51 : // NOLINT | 51 : // NOLINT |
| 52 heap_(heap), |
| 53 page_parallel_job_semaphore_(0), |
| 52 #ifdef DEBUG | 54 #ifdef DEBUG |
| 53 state_(IDLE), | 55 state_(IDLE), |
| 54 #endif | 56 #endif |
| 55 marking_parity_(ODD_MARKING_PARITY), | 57 marking_parity_(ODD_MARKING_PARITY), |
| 56 was_marked_incrementally_(false), | 58 was_marked_incrementally_(false), |
| 57 evacuation_(false), | 59 evacuation_(false), |
| 58 heap_(heap), | 60 compacting_(false), |
| 61 black_allocation_(false), |
| 62 have_code_to_deoptimize_(false), |
| 59 marking_deque_memory_(NULL), | 63 marking_deque_memory_(NULL), |
| 60 marking_deque_memory_committed_(0), | 64 marking_deque_memory_committed_(0), |
| 61 code_flusher_(nullptr), | 65 code_flusher_(nullptr), |
| 62 embedder_heap_tracer_(nullptr), | 66 embedder_heap_tracer_(nullptr), |
| 63 have_code_to_deoptimize_(false), | |
| 64 compacting_(false), | |
| 65 sweeper_(heap) { | 67 sweeper_(heap) { |
| 66 } | 68 } |
| 67 | 69 |
| 68 #ifdef VERIFY_HEAP | 70 #ifdef VERIFY_HEAP |
| 69 class VerifyMarkingVisitor : public ObjectVisitor { | 71 class VerifyMarkingVisitor : public ObjectVisitor { |
| 70 public: | 72 public: |
| 71 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 73 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
| 72 | 74 |
| 73 void VisitPointers(Object** start, Object** end) override { | 75 void VisitPointers(Object** start, Object** end) override { |
| 74 for (Object** current = start; current < end; current++) { | 76 for (Object** current = start; current < end; current++) { |
| (...skipping 398 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 473 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); | 475 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); |
| 474 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
| 475 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 477 const int offset = space_to_start_ - FIRST_PAGED_SPACE; |
| 476 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 478 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
| 477 for (int i = 0; i < num_spaces; i++) { | 479 for (int i = 0; i < num_spaces; i++) { |
| 478 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 480 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); |
| 479 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 481 DCHECK_GE(space_id, FIRST_PAGED_SPACE); |
| 480 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 482 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
| 481 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); | 483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); |
| 482 } | 484 } |
| 483 pending_sweeper_tasks_->Signal("SweeperTask::Run"); | 485 pending_sweeper_tasks_->Signal(); |
| 484 } | 486 } |
| 485 | 487 |
| 486 Sweeper* sweeper_; | 488 Sweeper* sweeper_; |
| 487 base::Semaphore* pending_sweeper_tasks_; | 489 base::Semaphore* pending_sweeper_tasks_; |
| 488 AllocationSpace space_to_start_; | 490 AllocationSpace space_to_start_; |
| 489 | 491 |
| 490 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 492 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
| 491 }; | 493 }; |
| 492 | 494 |
| 493 void MarkCompactCollector::Sweeper::StartSweeping() { | 495 void MarkCompactCollector::Sweeper::StartSweeping() { |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 578 VerifyEvacuation(heap_); | 580 VerifyEvacuation(heap_); |
| 579 } | 581 } |
| 580 #endif | 582 #endif |
| 581 } | 583 } |
| 582 | 584 |
| 583 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { | 585 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { |
| 584 if (!pending_sweeper_tasks_semaphore_.WaitFor( | 586 if (!pending_sweeper_tasks_semaphore_.WaitFor( |
| 585 base::TimeDelta::FromSeconds(0))) { | 587 base::TimeDelta::FromSeconds(0))) { |
| 586 return false; | 588 return false; |
| 587 } | 589 } |
| 588 pending_sweeper_tasks_semaphore_.Signal("Sweeper::IsSweepingCompleted"); | 590 pending_sweeper_tasks_semaphore_.Signal(); |
| 589 return true; | 591 return true; |
| 590 } | 592 } |
| 591 | 593 |
| 592 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { | 594 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { |
| 593 // This is only used when resizing an object. | 595 // This is only used when resizing an object. |
| 594 DCHECK(MemoryChunk::FromAddress(old_start) == | 596 DCHECK(MemoryChunk::FromAddress(old_start) == |
| 595 MemoryChunk::FromAddress(new_start)); | 597 MemoryChunk::FromAddress(new_start)); |
| 596 | 598 |
| 597 if (!heap->incremental_marking()->IsMarking() || | 599 if (!heap->incremental_marking()->IsMarking() || |
| 598 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) | 600 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) |
| (...skipping 2672 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3271 // Slots have already been recorded so we just need to add it to the | 3273 // Slots have already been recorded so we just need to add it to the |
| 3272 // sweeper. | 3274 // sweeper. |
| 3273 *data += 1; | 3275 *data += 1; |
| 3274 } | 3276 } |
| 3275 } | 3277 } |
| 3276 } | 3278 } |
| 3277 }; | 3279 }; |
| 3278 | 3280 |
| 3279 void MarkCompactCollector::EvacuatePagesInParallel() { | 3281 void MarkCompactCollector::EvacuatePagesInParallel() { |
| 3280 PageParallelJob<EvacuationJobTraits> job( | 3282 PageParallelJob<EvacuationJobTraits> job( |
| 3281 heap_, heap_->isolate()->cancelable_task_manager()); | 3283 heap_, heap_->isolate()->cancelable_task_manager(), |
| 3284 &page_parallel_job_semaphore_); |
| 3282 | 3285 |
| 3283 int abandoned_pages = 0; | 3286 int abandoned_pages = 0; |
| 3284 intptr_t live_bytes = 0; | 3287 intptr_t live_bytes = 0; |
| 3285 for (Page* page : evacuation_candidates_) { | 3288 for (Page* page : evacuation_candidates_) { |
| 3286 live_bytes += page->LiveBytes(); | 3289 live_bytes += page->LiveBytes(); |
| 3287 job.AddPage(page, &abandoned_pages); | 3290 job.AddPage(page, &abandoned_pages); |
| 3288 } | 3291 } |
| 3289 | 3292 |
| 3290 const Address age_mark = heap()->new_space()->age_mark(); | 3293 const Address age_mark = heap()->new_space()->age_mark(); |
| 3291 for (Page* page : newspace_evacuation_candidates_) { | 3294 for (Page* page : newspace_evacuation_candidates_) { |
| (...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3627 }; | 3630 }; |
| 3628 | 3631 |
| 3629 int NumberOfPointerUpdateTasks(int pages) { | 3632 int NumberOfPointerUpdateTasks(int pages) { |
| 3630 if (!FLAG_parallel_pointer_update) return 1; | 3633 if (!FLAG_parallel_pointer_update) return 1; |
| 3631 const int kMaxTasks = 4; | 3634 const int kMaxTasks = 4; |
| 3632 const int kPagesPerTask = 4; | 3635 const int kPagesPerTask = 4; |
| 3633 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask); | 3636 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask); |
| 3634 } | 3637 } |
| 3635 | 3638 |
| 3636 template <PointerDirection direction> | 3639 template <PointerDirection direction> |
| 3637 void UpdatePointersInParallel(Heap* heap) { | 3640 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| 3638 PageParallelJob<PointerUpdateJobTraits<direction> > job( | 3641 PageParallelJob<PointerUpdateJobTraits<direction> > job( |
| 3639 heap, heap->isolate()->cancelable_task_manager()); | 3642 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3640 RememberedSet<direction>::IterateMemoryChunks( | 3643 RememberedSet<direction>::IterateMemoryChunks( |
| 3641 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 3644 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
| 3642 PointersUpdatingVisitor visitor(heap); | 3645 PointersUpdatingVisitor visitor(heap); |
| 3643 int num_pages = job.NumberOfPages(); | 3646 int num_pages = job.NumberOfPages(); |
| 3644 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 3647 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
| 3645 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 3648 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
| 3646 } | 3649 } |
| 3647 | 3650 |
| 3648 class ToSpacePointerUpdateJobTraits { | 3651 class ToSpacePointerUpdateJobTraits { |
| 3649 public: | 3652 public: |
| 3650 typedef std::pair<Address, Address> PerPageData; | 3653 typedef std::pair<Address, Address> PerPageData; |
| 3651 typedef PointersUpdatingVisitor* PerTaskData; | 3654 typedef PointersUpdatingVisitor* PerTaskData; |
| 3652 | 3655 |
| 3653 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 3656 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
| 3654 MemoryChunk* chunk, PerPageData limits) { | 3657 MemoryChunk* chunk, PerPageData limits) { |
| 3655 for (Address cur = limits.first; cur < limits.second;) { | 3658 for (Address cur = limits.first; cur < limits.second;) { |
| 3656 HeapObject* object = HeapObject::FromAddress(cur); | 3659 HeapObject* object = HeapObject::FromAddress(cur); |
| 3657 Map* map = object->map(); | 3660 Map* map = object->map(); |
| 3658 int size = object->SizeFromMap(map); | 3661 int size = object->SizeFromMap(map); |
| 3659 object->IterateBody(map->instance_type(), size, visitor); | 3662 object->IterateBody(map->instance_type(), size, visitor); |
| 3660 cur += size; | 3663 cur += size; |
| 3661 } | 3664 } |
| 3662 return true; | 3665 return true; |
| 3663 } | 3666 } |
| 3664 static const bool NeedSequentialFinalization = false; | 3667 static const bool NeedSequentialFinalization = false; |
| 3665 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 3668 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
| 3666 } | 3669 } |
| 3667 }; | 3670 }; |
| 3668 | 3671 |
| 3669 void UpdateToSpacePointersInParallel(Heap* heap) { | 3672 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| 3670 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 3673 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
| 3671 heap, heap->isolate()->cancelable_task_manager()); | 3674 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3672 Address space_start = heap->new_space()->bottom(); | 3675 Address space_start = heap->new_space()->bottom(); |
| 3673 Address space_end = heap->new_space()->top(); | 3676 Address space_end = heap->new_space()->top(); |
| 3674 NewSpacePageIterator it(space_start, space_end); | 3677 NewSpacePageIterator it(space_start, space_end); |
| 3675 while (it.has_next()) { | 3678 while (it.has_next()) { |
| 3676 Page* page = it.next(); | 3679 Page* page = it.next(); |
| 3677 Address start = | 3680 Address start = |
| 3678 page->Contains(space_start) ? space_start : page->area_start(); | 3681 page->Contains(space_start) ? space_start : page->area_start(); |
| 3679 Address end = page->Contains(space_end) ? space_end : page->area_end(); | 3682 Address end = page->Contains(space_end) ? space_end : page->area_end(); |
| 3680 job.AddPage(page, std::make_pair(start, end)); | 3683 job.AddPage(page, std::make_pair(start, end)); |
| 3681 } | 3684 } |
| 3682 PointersUpdatingVisitor visitor(heap); | 3685 PointersUpdatingVisitor visitor(heap); |
| 3683 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; | 3686 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
| 3684 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 3687 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
| 3685 } | 3688 } |
| 3686 | 3689 |
| 3687 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 3690 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
| 3688 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 3691 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
| 3689 | 3692 |
| 3690 PointersUpdatingVisitor updating_visitor(heap()); | 3693 PointersUpdatingVisitor updating_visitor(heap()); |
| 3691 | 3694 |
| 3692 { | 3695 { |
| 3693 TRACE_GC(heap()->tracer(), | 3696 TRACE_GC(heap()->tracer(), |
| 3694 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 3697 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
| 3695 UpdateToSpacePointersInParallel(heap_); | 3698 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); |
| 3696 // Update roots. | 3699 // Update roots. |
| 3697 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 3700 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| 3698 UpdatePointersInParallel<OLD_TO_NEW>(heap_); | 3701 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
| 3699 } | 3702 } |
| 3700 | 3703 |
| 3701 { | 3704 { |
| 3702 Heap* heap = this->heap(); | 3705 Heap* heap = this->heap(); |
| 3703 TRACE_GC(heap->tracer(), | 3706 TRACE_GC(heap->tracer(), |
| 3704 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 3707 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
| 3705 UpdatePointersInParallel<OLD_TO_OLD>(heap_); | 3708 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); |
| 3706 } | 3709 } |
| 3707 | 3710 |
| 3708 { | 3711 { |
| 3709 TRACE_GC(heap()->tracer(), | 3712 TRACE_GC(heap()->tracer(), |
| 3710 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 3713 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
| 3711 // Update pointers from external string table. | 3714 // Update pointers from external string table. |
| 3712 heap_->UpdateReferencesInExternalStringTable( | 3715 heap_->UpdateReferencesInExternalStringTable( |
| 3713 &UpdateReferenceInExternalStringTableEntry); | 3716 &UpdateReferenceInExternalStringTableEntry); |
| 3714 | 3717 |
| 3715 EvacuationWeakObjectRetainer evacuation_object_retainer; | 3718 EvacuationWeakObjectRetainer evacuation_object_retainer; |
| (...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3947 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3950 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 3948 if (Marking::IsBlack(mark_bit)) { | 3951 if (Marking::IsBlack(mark_bit)) { |
| 3949 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3952 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
| 3950 RecordRelocSlot(host, &rinfo, target); | 3953 RecordRelocSlot(host, &rinfo, target); |
| 3951 } | 3954 } |
| 3952 } | 3955 } |
| 3953 } | 3956 } |
| 3954 | 3957 |
| 3955 } // namespace internal | 3958 } // namespace internal |
| 3956 } // namespace v8 | 3959 } // namespace v8 |
| OLD | NEW |