OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 // The following has to hold in order for {Marking::MarkBitFrom} to not produce | 42 // The following has to hold in order for {Marking::MarkBitFrom} to not produce |
43 // invalid {kImpossibleBitPattern} in the marking bitmap by overlapping. | 43 // invalid {kImpossibleBitPattern} in the marking bitmap by overlapping. |
44 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2); | 44 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2); |
45 | 45 |
46 | 46 |
47 // ------------------------------------------------------------------------- | 47 // ------------------------------------------------------------------------- |
48 // MarkCompactCollector | 48 // MarkCompactCollector |
49 | 49 |
50 MarkCompactCollector::MarkCompactCollector(Heap* heap) | 50 MarkCompactCollector::MarkCompactCollector(Heap* heap) |
51 : // NOLINT | 51 : // NOLINT |
| 52 heap_(heap), |
| 53 page_parallel_job_semaphore_(0), |
52 #ifdef DEBUG | 54 #ifdef DEBUG |
53 state_(IDLE), | 55 state_(IDLE), |
54 #endif | 56 #endif |
55 marking_parity_(ODD_MARKING_PARITY), | 57 marking_parity_(ODD_MARKING_PARITY), |
56 was_marked_incrementally_(false), | 58 was_marked_incrementally_(false), |
57 evacuation_(false), | 59 evacuation_(false), |
58 heap_(heap), | 60 compacting_(false), |
| 61 black_allocation_(false), |
| 62 have_code_to_deoptimize_(false), |
59 marking_deque_memory_(NULL), | 63 marking_deque_memory_(NULL), |
60 marking_deque_memory_committed_(0), | 64 marking_deque_memory_committed_(0), |
61 code_flusher_(nullptr), | 65 code_flusher_(nullptr), |
62 embedder_heap_tracer_(nullptr), | 66 embedder_heap_tracer_(nullptr), |
63 have_code_to_deoptimize_(false), | |
64 compacting_(false), | |
65 sweeper_(heap) { | 67 sweeper_(heap) { |
66 } | 68 } |
67 | 69 |
68 #ifdef VERIFY_HEAP | 70 #ifdef VERIFY_HEAP |
69 class VerifyMarkingVisitor : public ObjectVisitor { | 71 class VerifyMarkingVisitor : public ObjectVisitor { |
70 public: | 72 public: |
71 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 73 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
72 | 74 |
73 void VisitPointers(Object** start, Object** end) override { | 75 void VisitPointers(Object** start, Object** end) override { |
74 for (Object** current = start; current < end; current++) { | 76 for (Object** current = start; current < end; current++) { |
(...skipping 398 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
473 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); | 475 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); |
474 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
475 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 477 const int offset = space_to_start_ - FIRST_PAGED_SPACE; |
476 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 478 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
477 for (int i = 0; i < num_spaces; i++) { | 479 for (int i = 0; i < num_spaces; i++) { |
478 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 480 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); |
479 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 481 DCHECK_GE(space_id, FIRST_PAGED_SPACE); |
480 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 482 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
481 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); | 483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); |
482 } | 484 } |
483 pending_sweeper_tasks_->Signal("SweeperTask::Run"); | 485 pending_sweeper_tasks_->Signal(); |
484 } | 486 } |
485 | 487 |
486 Sweeper* sweeper_; | 488 Sweeper* sweeper_; |
487 base::Semaphore* pending_sweeper_tasks_; | 489 base::Semaphore* pending_sweeper_tasks_; |
488 AllocationSpace space_to_start_; | 490 AllocationSpace space_to_start_; |
489 | 491 |
490 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 492 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
491 }; | 493 }; |
492 | 494 |
493 void MarkCompactCollector::Sweeper::StartSweeping() { | 495 void MarkCompactCollector::Sweeper::StartSweeping() { |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
578 VerifyEvacuation(heap_); | 580 VerifyEvacuation(heap_); |
579 } | 581 } |
580 #endif | 582 #endif |
581 } | 583 } |
582 | 584 |
583 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { | 585 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { |
584 if (!pending_sweeper_tasks_semaphore_.WaitFor( | 586 if (!pending_sweeper_tasks_semaphore_.WaitFor( |
585 base::TimeDelta::FromSeconds(0))) { | 587 base::TimeDelta::FromSeconds(0))) { |
586 return false; | 588 return false; |
587 } | 589 } |
588 pending_sweeper_tasks_semaphore_.Signal("Sweeper::IsSweepingCompleted"); | 590 pending_sweeper_tasks_semaphore_.Signal(); |
589 return true; | 591 return true; |
590 } | 592 } |
591 | 593 |
592 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { | 594 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { |
593 // This is only used when resizing an object. | 595 // This is only used when resizing an object. |
594 DCHECK(MemoryChunk::FromAddress(old_start) == | 596 DCHECK(MemoryChunk::FromAddress(old_start) == |
595 MemoryChunk::FromAddress(new_start)); | 597 MemoryChunk::FromAddress(new_start)); |
596 | 598 |
597 if (!heap->incremental_marking()->IsMarking() || | 599 if (!heap->incremental_marking()->IsMarking() || |
598 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) | 600 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) |
(...skipping 2746 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3345 // Slots have already been recorded so we just need to add it to the | 3347 // Slots have already been recorded so we just need to add it to the |
3346 // sweeper. | 3348 // sweeper. |
3347 *data += 1; | 3349 *data += 1; |
3348 } | 3350 } |
3349 } | 3351 } |
3350 } | 3352 } |
3351 }; | 3353 }; |
3352 | 3354 |
3353 void MarkCompactCollector::EvacuatePagesInParallel() { | 3355 void MarkCompactCollector::EvacuatePagesInParallel() { |
3354 PageParallelJob<EvacuationJobTraits> job( | 3356 PageParallelJob<EvacuationJobTraits> job( |
3355 heap_, heap_->isolate()->cancelable_task_manager()); | 3357 heap_, heap_->isolate()->cancelable_task_manager(), |
| 3358 &page_parallel_job_semaphore_); |
3356 | 3359 |
3357 int abandoned_pages = 0; | 3360 int abandoned_pages = 0; |
3358 intptr_t live_bytes = 0; | 3361 intptr_t live_bytes = 0; |
3359 for (Page* page : evacuation_candidates_) { | 3362 for (Page* page : evacuation_candidates_) { |
3360 live_bytes += page->LiveBytes(); | 3363 live_bytes += page->LiveBytes(); |
3361 job.AddPage(page, &abandoned_pages); | 3364 job.AddPage(page, &abandoned_pages); |
3362 } | 3365 } |
3363 | 3366 |
3364 const Address age_mark = heap()->new_space()->age_mark(); | 3367 const Address age_mark = heap()->new_space()->age_mark(); |
3365 for (Page* page : newspace_evacuation_candidates_) { | 3368 for (Page* page : newspace_evacuation_candidates_) { |
(...skipping 348 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3714 }; | 3717 }; |
3715 | 3718 |
3716 int NumberOfPointerUpdateTasks(int pages) { | 3719 int NumberOfPointerUpdateTasks(int pages) { |
3717 if (!FLAG_parallel_pointer_update) return 1; | 3720 if (!FLAG_parallel_pointer_update) return 1; |
3718 const int kMaxTasks = 4; | 3721 const int kMaxTasks = 4; |
3719 const int kPagesPerTask = 4; | 3722 const int kPagesPerTask = 4; |
3720 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask); | 3723 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask); |
3721 } | 3724 } |
3722 | 3725 |
3723 template <PointerDirection direction> | 3726 template <PointerDirection direction> |
3724 void UpdatePointersInParallel(Heap* heap) { | 3727 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
3725 PageParallelJob<PointerUpdateJobTraits<direction> > job( | 3728 PageParallelJob<PointerUpdateJobTraits<direction> > job( |
3726 heap, heap->isolate()->cancelable_task_manager()); | 3729 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3727 RememberedSet<direction>::IterateMemoryChunks( | 3730 RememberedSet<direction>::IterateMemoryChunks( |
3728 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | 3731 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
3729 int num_pages = job.NumberOfPages(); | 3732 int num_pages = job.NumberOfPages(); |
3730 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | 3733 int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
3731 job.Run(num_tasks, [](int i) { return 0; }); | 3734 job.Run(num_tasks, [](int i) { return 0; }); |
3732 } | 3735 } |
3733 | 3736 |
3734 class ToSpacePointerUpdateJobTraits { | 3737 class ToSpacePointerUpdateJobTraits { |
3735 public: | 3738 public: |
3736 typedef std::pair<Address, Address> PerPageData; | 3739 typedef std::pair<Address, Address> PerPageData; |
3737 typedef PointersUpdatingVisitor* PerTaskData; | 3740 typedef PointersUpdatingVisitor* PerTaskData; |
3738 | 3741 |
3739 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 3742 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
3740 MemoryChunk* chunk, PerPageData limits) { | 3743 MemoryChunk* chunk, PerPageData limits) { |
3741 for (Address cur = limits.first; cur < limits.second;) { | 3744 for (Address cur = limits.first; cur < limits.second;) { |
3742 HeapObject* object = HeapObject::FromAddress(cur); | 3745 HeapObject* object = HeapObject::FromAddress(cur); |
3743 Map* map = object->map(); | 3746 Map* map = object->map(); |
3744 int size = object->SizeFromMap(map); | 3747 int size = object->SizeFromMap(map); |
3745 object->IterateBody(map->instance_type(), size, visitor); | 3748 object->IterateBody(map->instance_type(), size, visitor); |
3746 cur += size; | 3749 cur += size; |
3747 } | 3750 } |
3748 return true; | 3751 return true; |
3749 } | 3752 } |
3750 static const bool NeedSequentialFinalization = false; | 3753 static const bool NeedSequentialFinalization = false; |
3751 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 3754 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3752 } | 3755 } |
3753 }; | 3756 }; |
3754 | 3757 |
3755 void UpdateToSpacePointersInParallel(Heap* heap) { | 3758 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
3756 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 3759 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
3757 heap, heap->isolate()->cancelable_task_manager()); | 3760 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3758 Address space_start = heap->new_space()->bottom(); | 3761 Address space_start = heap->new_space()->bottom(); |
3759 Address space_end = heap->new_space()->top(); | 3762 Address space_end = heap->new_space()->top(); |
3760 NewSpacePageIterator it(space_start, space_end); | 3763 NewSpacePageIterator it(space_start, space_end); |
3761 while (it.has_next()) { | 3764 while (it.has_next()) { |
3762 Page* page = it.next(); | 3765 Page* page = it.next(); |
3763 Address start = | 3766 Address start = |
3764 page->Contains(space_start) ? space_start : page->area_start(); | 3767 page->Contains(space_start) ? space_start : page->area_start(); |
3765 Address end = page->Contains(space_end) ? space_end : page->area_end(); | 3768 Address end = page->Contains(space_end) ? space_end : page->area_end(); |
3766 job.AddPage(page, std::make_pair(start, end)); | 3769 job.AddPage(page, std::make_pair(start, end)); |
3767 } | 3770 } |
3768 PointersUpdatingVisitor visitor; | 3771 PointersUpdatingVisitor visitor; |
3769 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; | 3772 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1; |
3770 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | 3773 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); |
3771 } | 3774 } |
3772 | 3775 |
3773 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 3776 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
3774 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 3777 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
3775 | 3778 |
3776 PointersUpdatingVisitor updating_visitor; | 3779 PointersUpdatingVisitor updating_visitor; |
3777 | 3780 |
3778 { | 3781 { |
3779 TRACE_GC(heap()->tracer(), | 3782 TRACE_GC(heap()->tracer(), |
3780 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 3783 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
3781 UpdateToSpacePointersInParallel(heap_); | 3784 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_); |
3782 // Update roots. | 3785 // Update roots. |
3783 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 3786 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
3784 UpdatePointersInParallel<OLD_TO_NEW>(heap_); | 3787 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_); |
3785 } | 3788 } |
3786 | 3789 |
3787 { | 3790 { |
3788 Heap* heap = this->heap(); | 3791 Heap* heap = this->heap(); |
3789 TRACE_GC(heap->tracer(), | 3792 TRACE_GC(heap->tracer(), |
3790 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 3793 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
3791 UpdatePointersInParallel<OLD_TO_OLD>(heap_); | 3794 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_); |
3792 } | 3795 } |
3793 | 3796 |
3794 { | 3797 { |
3795 TRACE_GC(heap()->tracer(), | 3798 TRACE_GC(heap()->tracer(), |
3796 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); | 3799 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK); |
3797 // Update pointers from external string table. | 3800 // Update pointers from external string table. |
3798 heap_->UpdateReferencesInExternalStringTable( | 3801 heap_->UpdateReferencesInExternalStringTable( |
3799 &UpdateReferenceInExternalStringTableEntry); | 3802 &UpdateReferenceInExternalStringTableEntry); |
3800 | 3803 |
3801 EvacuationWeakObjectRetainer evacuation_object_retainer; | 3804 EvacuationWeakObjectRetainer evacuation_object_retainer; |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4033 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4036 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4034 if (Marking::IsBlack(mark_bit)) { | 4037 if (Marking::IsBlack(mark_bit)) { |
4035 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4038 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
4036 RecordRelocSlot(host, &rinfo, target); | 4039 RecordRelocSlot(host, &rinfo, target); |
4037 } | 4040 } |
4038 } | 4041 } |
4039 } | 4042 } |
4040 | 4043 |
4041 } // namespace internal | 4044 } // namespace internal |
4042 } // namespace v8 | 4045 } // namespace v8 |
OLD | NEW |