OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 454 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
465 AllocationSpace space_to_start) | 465 AllocationSpace space_to_start) |
466 : sweeper_(sweeper), | 466 : sweeper_(sweeper), |
467 pending_sweeper_tasks_(pending_sweeper_tasks), | 467 pending_sweeper_tasks_(pending_sweeper_tasks), |
468 space_to_start_(space_to_start) {} | 468 space_to_start_(space_to_start) {} |
469 | 469 |
470 virtual ~SweeperTask() {} | 470 virtual ~SweeperTask() {} |
471 | 471 |
472 private: | 472 private: |
473 // v8::Task overrides. | 473 // v8::Task overrides. |
474 void Run() override { | 474 void Run() override { |
475 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); | 475 DCHECK_GE(space_to_start_, FIRST_SPACE); |
476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
477 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 477 const int offset = space_to_start_ - FIRST_SPACE; |
478 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 478 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1; |
479 for (int i = 0; i < num_spaces; i++) { | 479 for (int i = 0; i < num_spaces; i++) { |
480 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 480 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces); |
481 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 481 DCHECK_GE(space_id, FIRST_SPACE); |
482 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 482 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); | 483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); |
484 } | 484 } |
485 pending_sweeper_tasks_->Signal(); | 485 pending_sweeper_tasks_->Signal(); |
486 } | 486 } |
487 | 487 |
488 Sweeper* sweeper_; | 488 Sweeper* sweeper_; |
489 base::Semaphore* pending_sweeper_tasks_; | 489 base::Semaphore* pending_sweeper_tasks_; |
490 AllocationSpace space_to_start_; | 490 AllocationSpace space_to_start_; |
491 | 491 |
(...skipping 17 matching lines...) Expand all Loading... |
509 void MarkCompactCollector::Sweeper::StartSweepingHelper( | 509 void MarkCompactCollector::Sweeper::StartSweepingHelper( |
510 AllocationSpace space_to_start) { | 510 AllocationSpace space_to_start) { |
511 num_sweeping_tasks_.Increment(1); | 511 num_sweeping_tasks_.Increment(1); |
512 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 512 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), | 513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), |
514 v8::Platform::kShortRunningTask); | 514 v8::Platform::kShortRunningTask); |
515 } | 515 } |
516 | 516 |
517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( | 517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
518 Page* page) { | 518 Page* page) { |
519 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | |
520 if (!page->SweepingDone()) { | 519 if (!page->SweepingDone()) { |
521 ParallelSweepPage(page, owner); | 520 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
| 521 ParallelSweepPage(page, owner->identity()); |
522 if (!page->SweepingDone()) { | 522 if (!page->SweepingDone()) { |
523 // We were not able to sweep that page, i.e., a concurrent | 523 // We were not able to sweep that page, i.e., a concurrent |
524 // sweeper thread currently owns this page. Wait for the sweeper | 524 // sweeper thread currently owns this page. Wait for the sweeper |
525 // thread to be done with this page. | 525 // thread to be done with this page. |
526 page->WaitUntilSweepingCompleted(); | 526 page->WaitUntilSweepingCompleted(); |
527 } | 527 } |
528 } | 528 } |
529 } | 529 } |
530 | 530 |
531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
(...skipping 22 matching lines...) Expand all Loading... |
554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); | 554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); |
555 } | 555 } |
556 | 556 |
557 if (FLAG_concurrent_sweeping) { | 557 if (FLAG_concurrent_sweeping) { |
558 while (num_sweeping_tasks_.Value() > 0) { | 558 while (num_sweeping_tasks_.Value() > 0) { |
559 pending_sweeper_tasks_semaphore_.Wait(); | 559 pending_sweeper_tasks_semaphore_.Wait(); |
560 num_sweeping_tasks_.Increment(-1); | 560 num_sweeping_tasks_.Increment(-1); |
561 } | 561 } |
562 } | 562 } |
563 | 563 |
564 ForAllSweepingSpaces( | 564 ForAllSweepingSpaces([this](AllocationSpace space) { |
565 [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); }); | 565 if (space == NEW_SPACE) { |
| 566 swept_list_[NEW_SPACE].Clear(); |
| 567 } |
| 568 DCHECK(sweeping_list_[space].empty()); |
| 569 }); |
566 late_pages_ = false; | 570 late_pages_ = false; |
567 sweeping_in_progress_ = false; | 571 sweeping_in_progress_ = false; |
568 } | 572 } |
569 | 573 |
| 574 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { |
| 575 if (!sweeping_in_progress_) return; |
| 576 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
| 577 NewSpacePageIterator pit(heap_->new_space()); |
| 578 while (pit.has_next()) { |
| 579 Page* page = pit.next(); |
| 580 SweepOrWaitUntilSweepingCompleted(page); |
| 581 } |
| 582 } |
| 583 } |
| 584 |
570 void MarkCompactCollector::EnsureSweepingCompleted() { | 585 void MarkCompactCollector::EnsureSweepingCompleted() { |
571 if (!sweeper().sweeping_in_progress()) return; | 586 if (!sweeper().sweeping_in_progress()) return; |
572 | 587 |
573 sweeper().EnsureCompleted(); | 588 sweeper().EnsureCompleted(); |
574 heap()->old_space()->RefillFreeList(); | 589 heap()->old_space()->RefillFreeList(); |
575 heap()->code_space()->RefillFreeList(); | 590 heap()->code_space()->RefillFreeList(); |
576 heap()->map_space()->RefillFreeList(); | 591 heap()->map_space()->RefillFreeList(); |
577 | 592 |
578 #ifdef VERIFY_HEAP | 593 #ifdef VERIFY_HEAP |
579 if (FLAG_verify_heap && !evacuation()) { | 594 if (FLAG_verify_heap && !evacuation()) { |
(...skipping 1284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1864 AllocationSpace space_to_allocate_; | 1879 AllocationSpace space_to_allocate_; |
1865 intptr_t promoted_size_; | 1880 intptr_t promoted_size_; |
1866 intptr_t semispace_copied_size_; | 1881 intptr_t semispace_copied_size_; |
1867 base::HashMap* local_pretenuring_feedback_; | 1882 base::HashMap* local_pretenuring_feedback_; |
1868 }; | 1883 }; |
1869 | 1884 |
1870 class MarkCompactCollector::EvacuateNewSpacePageVisitor final | 1885 class MarkCompactCollector::EvacuateNewSpacePageVisitor final |
1871 : public MarkCompactCollector::HeapObjectVisitor { | 1886 : public MarkCompactCollector::HeapObjectVisitor { |
1872 public: | 1887 public: |
1873 explicit EvacuateNewSpacePageVisitor(Heap* heap) | 1888 explicit EvacuateNewSpacePageVisitor(Heap* heap) |
1874 : heap_(heap), promoted_size_(0) {} | 1889 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {} |
1875 | 1890 |
1876 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { | 1891 static void MoveToOldSpace(Page* page, PagedSpace* owner) { |
1877 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { | 1892 page->Unlink(); |
1878 Page* new_page = Page::ConvertNewToOld(page, owner); | 1893 Page* new_page = Page::ConvertNewToOld(page, owner); |
1879 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); | 1894 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); |
1880 } | 1895 } |
| 1896 |
| 1897 static void MoveToToSpace(Page* page) { |
| 1898 page->heap()->new_space()->MovePageFromSpaceToSpace(page); |
| 1899 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); |
1881 } | 1900 } |
1882 | 1901 |
1883 inline bool Visit(HeapObject* object) { | 1902 inline bool Visit(HeapObject* object) { |
1884 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); | 1903 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); |
1885 object->IterateBodyFast(&visitor); | 1904 object->IterateBodyFast(&visitor); |
1886 promoted_size_ += object->Size(); | 1905 promoted_size_ += object->Size(); |
1887 return true; | 1906 return true; |
1888 } | 1907 } |
1889 | 1908 |
1890 intptr_t promoted_size() { return promoted_size_; } | 1909 intptr_t promoted_size() { return promoted_size_; } |
| 1910 intptr_t semispace_copied_size() { return semispace_copied_size_; } |
| 1911 |
| 1912 void account_semispace_copied(intptr_t copied) { |
| 1913 semispace_copied_size_ += copied; |
| 1914 } |
1891 | 1915 |
1892 private: | 1916 private: |
1893 Heap* heap_; | 1917 Heap* heap_; |
1894 intptr_t promoted_size_; | 1918 intptr_t promoted_size_; |
| 1919 intptr_t semispace_copied_size_; |
1895 }; | 1920 }; |
1896 | 1921 |
1897 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1922 class MarkCompactCollector::EvacuateOldSpaceVisitor final |
1898 : public MarkCompactCollector::EvacuateVisitorBase { | 1923 : public MarkCompactCollector::EvacuateVisitorBase { |
1899 public: | 1924 public: |
1900 EvacuateOldSpaceVisitor(Heap* heap, | 1925 EvacuateOldSpaceVisitor(Heap* heap, |
1901 CompactionSpaceCollection* compaction_spaces) | 1926 CompactionSpaceCollection* compaction_spaces) |
1902 : EvacuateVisitorBase(heap, compaction_spaces) {} | 1927 : EvacuateVisitorBase(heap, compaction_spaces) {} |
1903 | 1928 |
1904 inline bool Visit(HeapObject* object) override { | 1929 inline bool Visit(HeapObject* object) override { |
(...skipping 1123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3028 NewSpace* new_space = heap()->new_space(); | 3053 NewSpace* new_space = heap()->new_space(); |
3029 NewSpacePageIterator it(new_space->bottom(), new_space->top()); | 3054 NewSpacePageIterator it(new_space->bottom(), new_space->top()); |
3030 // Append the list of new space pages to be processed. | 3055 // Append the list of new space pages to be processed. |
3031 while (it.has_next()) { | 3056 while (it.has_next()) { |
3032 newspace_evacuation_candidates_.Add(it.next()); | 3057 newspace_evacuation_candidates_.Add(it.next()); |
3033 } | 3058 } |
3034 new_space->Flip(); | 3059 new_space->Flip(); |
3035 new_space->ResetAllocationInfo(); | 3060 new_space->ResetAllocationInfo(); |
3036 } | 3061 } |
3037 | 3062 |
3038 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { | |
3039 newspace_evacuation_candidates_.Rewind(0); | |
3040 } | |
3041 | |
3042 class MarkCompactCollector::Evacuator : public Malloced { | 3063 class MarkCompactCollector::Evacuator : public Malloced { |
3043 public: | 3064 public: |
| 3065 enum EvacuationMode { |
| 3066 kObjectsNewToOld, |
| 3067 kPageNewToOld, |
| 3068 kObjectsOldToOld, |
| 3069 kPageNewToNew, |
| 3070 }; |
| 3071 |
| 3072 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { |
| 3073 // Note: The order of checks is important in this function. |
| 3074 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) |
| 3075 return kPageNewToOld; |
| 3076 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)) |
| 3077 return kPageNewToNew; |
| 3078 if (chunk->InNewSpace()) return kObjectsNewToOld; |
| 3079 DCHECK(chunk->IsEvacuationCandidate()); |
| 3080 return kObjectsOldToOld; |
| 3081 } |
| 3082 |
3044 // NewSpacePages with more live bytes than this threshold qualify for fast | 3083 // NewSpacePages with more live bytes than this threshold qualify for fast |
3045 // evacuation. | 3084 // evacuation. |
3046 static int PageEvacuationThreshold() { | 3085 static int PageEvacuationThreshold() { |
3047 if (FLAG_page_promotion) | 3086 if (FLAG_page_promotion) |
3048 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; | 3087 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; |
3049 return Page::kAllocatableMemory + kPointerSize; | 3088 return Page::kAllocatableMemory + kPointerSize; |
3050 } | 3089 } |
3051 | 3090 |
3052 explicit Evacuator(MarkCompactCollector* collector) | 3091 explicit Evacuator(MarkCompactCollector* collector) |
3053 : collector_(collector), | 3092 : collector_(collector), |
3054 compaction_spaces_(collector->heap()), | 3093 compaction_spaces_(collector->heap()), |
3055 local_pretenuring_feedback_(base::HashMap::PointersMatch, | 3094 local_pretenuring_feedback_(base::HashMap::PointersMatch, |
3056 kInitialLocalPretenuringFeedbackCapacity), | 3095 kInitialLocalPretenuringFeedbackCapacity), |
3057 new_space_visitor_(collector->heap(), &compaction_spaces_, | 3096 new_space_visitor_(collector->heap(), &compaction_spaces_, |
3058 &local_pretenuring_feedback_), | 3097 &local_pretenuring_feedback_), |
3059 new_space_page_visitor(collector->heap()), | 3098 new_space_page_visitor(collector->heap()), |
3060 old_space_visitor_(collector->heap(), &compaction_spaces_), | 3099 old_space_visitor_(collector->heap(), &compaction_spaces_), |
3061 duration_(0.0), | 3100 duration_(0.0), |
3062 bytes_compacted_(0) {} | 3101 bytes_compacted_(0) {} |
3063 | 3102 |
3064 inline bool EvacuatePage(Page* chunk); | 3103 inline bool EvacuatePage(Page* chunk); |
3065 | 3104 |
3066 // Merge back locally cached info sequentially. Note that this method needs | 3105 // Merge back locally cached info sequentially. Note that this method needs |
3067 // to be called from the main thread. | 3106 // to be called from the main thread. |
3068 inline void Finalize(); | 3107 inline void Finalize(); |
3069 | 3108 |
3070 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | 3109 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } |
3071 | 3110 |
3072 private: | 3111 private: |
3073 enum EvacuationMode { | |
3074 kObjectsNewToOld, | |
3075 kPageNewToOld, | |
3076 kObjectsOldToOld, | |
3077 }; | |
3078 | |
3079 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | 3112 static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
3080 | 3113 |
3081 inline Heap* heap() { return collector_->heap(); } | 3114 inline Heap* heap() { return collector_->heap(); } |
3082 | 3115 |
3083 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { | |
3084 // Note: The order of checks is important in this function. | |
3085 if (chunk->InNewSpace()) return kObjectsNewToOld; | |
3086 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) | |
3087 return kPageNewToOld; | |
3088 DCHECK(chunk->IsEvacuationCandidate()); | |
3089 return kObjectsOldToOld; | |
3090 } | |
3091 | |
3092 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { | 3116 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { |
3093 duration_ += duration; | 3117 duration_ += duration; |
3094 bytes_compacted_ += bytes_compacted; | 3118 bytes_compacted_ += bytes_compacted; |
3095 } | 3119 } |
3096 | 3120 |
3097 template <IterationMode mode, class Visitor> | |
3098 inline bool EvacuateSinglePage(Page* p, Visitor* visitor); | |
3099 | |
3100 MarkCompactCollector* collector_; | 3121 MarkCompactCollector* collector_; |
3101 | 3122 |
3102 // Locally cached collector data. | 3123 // Locally cached collector data. |
3103 CompactionSpaceCollection compaction_spaces_; | 3124 CompactionSpaceCollection compaction_spaces_; |
3104 base::HashMap local_pretenuring_feedback_; | 3125 base::HashMap local_pretenuring_feedback_; |
3105 | 3126 |
3106 // Visitors for the corresponding spaces. | 3127 // Visitors for the corresponding spaces. |
3107 EvacuateNewSpaceVisitor new_space_visitor_; | 3128 EvacuateNewSpaceVisitor new_space_visitor_; |
3108 EvacuateNewSpacePageVisitor new_space_page_visitor; | 3129 EvacuateNewSpacePageVisitor new_space_page_visitor; |
3109 EvacuateOldSpaceVisitor old_space_visitor_; | 3130 EvacuateOldSpaceVisitor old_space_visitor_; |
3110 | 3131 |
3111 // Book keeping info. | 3132 // Book keeping info. |
3112 double duration_; | 3133 double duration_; |
3113 intptr_t bytes_compacted_; | 3134 intptr_t bytes_compacted_; |
3114 }; | 3135 }; |
3115 | 3136 |
3116 template <MarkCompactCollector::IterationMode mode, class Visitor> | 3137 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { |
3117 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p, | |
3118 Visitor* visitor) { | |
3119 bool success = false; | 3138 bool success = false; |
3120 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || | 3139 DCHECK(page->SweepingDone()); |
3121 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); | 3140 int saved_live_bytes = page->LiveBytes(); |
3122 int saved_live_bytes = p->LiveBytes(); | 3141 double evacuation_time = 0.0; |
3123 double evacuation_time; | 3142 Heap* heap = page->heap(); |
3124 { | 3143 { |
3125 AlwaysAllocateScope always_allocate(heap()->isolate()); | 3144 AlwaysAllocateScope always_allocate(heap->isolate()); |
3126 TimedScope timed_scope(&evacuation_time); | 3145 TimedScope timed_scope(&evacuation_time); |
3127 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); | 3146 switch (ComputeEvacuationMode(page)) { |
| 3147 case kObjectsNewToOld: |
| 3148 success = collector_->VisitLiveObjects(page, &new_space_visitor_, |
| 3149 kClearMarkbits); |
| 3150 ArrayBufferTracker::ProcessBuffers( |
| 3151 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3152 DCHECK(success); |
| 3153 break; |
| 3154 case kPageNewToOld: |
| 3155 success = collector_->VisitLiveObjects(page, &new_space_page_visitor, |
| 3156 kKeepMarking); |
| 3157 // ArrayBufferTracker will be updated during sweeping. |
| 3158 DCHECK(success); |
| 3159 break; |
| 3160 case kPageNewToNew: |
| 3161 new_space_page_visitor.account_semispace_copied(page->LiveBytes()); |
| 3162 // ArrayBufferTracker will be updated during sweeping. |
| 3163 success = true; |
| 3164 break; |
| 3165 case kObjectsOldToOld: |
| 3166 success = collector_->VisitLiveObjects(page, &old_space_visitor_, |
| 3167 kClearMarkbits); |
| 3168 if (!success) { |
| 3169 // Aborted compaction page. We have to record slots here, since we |
| 3170 // might not have recorded them in first place. |
| 3171 // Note: We mark the page as aborted here to be able to record slots |
| 3172 // for code objects in |RecordMigratedSlotVisitor|. |
| 3173 page->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| 3174 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); |
| 3175 success = |
| 3176 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking); |
| 3177 ArrayBufferTracker::ProcessBuffers( |
| 3178 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); |
| 3179 DCHECK(success); |
| 3180 // We need to return failure here to indicate that we want this page |
| 3181 // added to the sweeper. |
| 3182 success = false; |
| 3183 } else { |
| 3184 ArrayBufferTracker::ProcessBuffers( |
| 3185 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3186 } |
| 3187 break; |
| 3188 default: |
| 3189 UNREACHABLE(); |
| 3190 } |
3128 } | 3191 } |
| 3192 ReportCompactionProgress(evacuation_time, saved_live_bytes); |
3129 if (FLAG_trace_evacuation) { | 3193 if (FLAG_trace_evacuation) { |
3130 const char age_mark_tag = | 3194 PrintIsolate(heap->isolate(), |
3131 !p->InNewSpace() | 3195 "evacuation[%p]: page=%p new_space=%d " |
3132 ? 'x' | 3196 "page_evacuation=%d executable=%d contains_age_mark=%d " |
3133 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) | 3197 "live_bytes=%d time=%f\n", |
3134 ? '>' | 3198 static_cast<void*>(this), static_cast<void*>(page), |
3135 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' | 3199 page->InNewSpace(), |
3136 : '#'; | 3200 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || |
3137 PrintIsolate(heap()->isolate(), | 3201 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), |
3138 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " | 3202 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), |
3139 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", | 3203 page->Contains(heap->new_space()->age_mark()), |
3140 static_cast<void*>(this), static_cast<void*>(p), | 3204 saved_live_bytes, evacuation_time); |
3141 p->InNewSpace(), age_mark_tag, | |
3142 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION), | |
3143 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, | |
3144 evacuation_time); | |
3145 } | |
3146 if (success) { | |
3147 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
3148 } | 3205 } |
3149 return success; | 3206 return success; |
3150 } | 3207 } |
3151 | 3208 |
3152 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { | |
3153 bool result = false; | |
3154 DCHECK(page->SweepingDone()); | |
3155 switch (ComputeEvacuationMode(page)) { | |
3156 case kObjectsNewToOld: | |
3157 result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_); | |
3158 ArrayBufferTracker::ProcessBuffers( | |
3159 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
3160 DCHECK(result); | |
3161 USE(result); | |
3162 break; | |
3163 case kPageNewToOld: | |
3164 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor); | |
3165 // ArrayBufferTracker will be updated during sweeping. | |
3166 DCHECK(result); | |
3167 USE(result); | |
3168 break; | |
3169 case kObjectsOldToOld: | |
3170 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_); | |
3171 if (!result) { | |
3172 // Aborted compaction page. We have to record slots here, since we might | |
3173 // not have recorded them in first place. | |
3174 // Note: We mark the page as aborted here to be able to record slots | |
3175 // for code objects in |RecordMigratedSlotVisitor|. | |
3176 page->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
3177 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); | |
3178 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor); | |
3179 ArrayBufferTracker::ProcessBuffers( | |
3180 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); | |
3181 DCHECK(result); | |
3182 USE(result); | |
3183 // We need to return failure here to indicate that we want this page | |
3184 // added to the sweeper. | |
3185 return false; | |
3186 } | |
3187 ArrayBufferTracker::ProcessBuffers( | |
3188 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
3189 | |
3190 break; | |
3191 default: | |
3192 UNREACHABLE(); | |
3193 } | |
3194 return result; | |
3195 } | |
3196 | |
3197 void MarkCompactCollector::Evacuator::Finalize() { | 3209 void MarkCompactCollector::Evacuator::Finalize() { |
3198 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3210 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
3199 heap()->code_space()->MergeCompactionSpace( | 3211 heap()->code_space()->MergeCompactionSpace( |
3200 compaction_spaces_.Get(CODE_SPACE)); | 3212 compaction_spaces_.Get(CODE_SPACE)); |
3201 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3213 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
3202 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + | 3214 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + |
3203 new_space_page_visitor.promoted_size()); | 3215 new_space_page_visitor.promoted_size()); |
3204 heap()->IncrementSemiSpaceCopiedObjectSize( | 3216 heap()->IncrementSemiSpaceCopiedObjectSize( |
3205 new_space_visitor_.semispace_copied_size()); | 3217 new_space_visitor_.semispace_copied_size() + |
| 3218 new_space_page_visitor.semispace_copied_size()); |
3206 heap()->IncrementYoungSurvivorsCounter( | 3219 heap()->IncrementYoungSurvivorsCounter( |
3207 new_space_visitor_.promoted_size() + | 3220 new_space_visitor_.promoted_size() + |
3208 new_space_visitor_.semispace_copied_size() + | 3221 new_space_visitor_.semispace_copied_size() + |
3209 new_space_page_visitor.promoted_size()); | 3222 new_space_page_visitor.promoted_size() + |
| 3223 new_space_page_visitor.semispace_copied_size()); |
3210 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | 3224 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
3211 } | 3225 } |
3212 | 3226 |
3213 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3227 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
3214 intptr_t live_bytes) { | 3228 intptr_t live_bytes) { |
3215 if (!FLAG_parallel_compaction) return 1; | 3229 if (!FLAG_parallel_compaction) return 1; |
3216 // Compute the number of needed tasks based on a target compaction time, the | 3230 // Compute the number of needed tasks based on a target compaction time, the |
3217 // profiled compaction speed and marked live memory. | 3231 // profiled compaction speed and marked live memory. |
3218 // | 3232 // |
3219 // The number of parallel compaction tasks is limited by: | 3233 // The number of parallel compaction tasks is limited by: |
(...skipping 27 matching lines...) Expand all Loading... |
3247 | 3261 |
3248 static const bool NeedSequentialFinalization = true; | 3262 static const bool NeedSequentialFinalization = true; |
3249 | 3263 |
3250 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3264 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
3251 MemoryChunk* chunk, PerPageData) { | 3265 MemoryChunk* chunk, PerPageData) { |
3252 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3266 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
3253 } | 3267 } |
3254 | 3268 |
3255 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3269 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
3256 bool success, PerPageData data) { | 3270 bool success, PerPageData data) { |
3257 if (chunk->InNewSpace()) { | 3271 using Evacuator = MarkCompactCollector::Evacuator; |
3258 DCHECK(success); | 3272 Page* p = static_cast<Page*>(chunk); |
3259 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { | 3273 switch (Evacuator::ComputeEvacuationMode(p)) { |
3260 DCHECK(success); | 3274 case Evacuator::kPageNewToOld: |
3261 Page* p = static_cast<Page*>(chunk); | 3275 break; |
3262 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); | 3276 case Evacuator::kPageNewToNew: |
3263 p->ForAllFreeListCategories( | 3277 DCHECK(success); |
3264 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); | 3278 break; |
3265 heap->mark_compact_collector()->sweeper().AddLatePage( | 3279 case Evacuator::kObjectsNewToOld: |
3266 p->owner()->identity(), p); | 3280 DCHECK(success); |
3267 } else { | 3281 break; |
3268 Page* p = static_cast<Page*>(chunk); | 3282 case Evacuator::kObjectsOldToOld: |
3269 if (success) { | 3283 if (success) { |
3270 DCHECK(p->IsEvacuationCandidate()); | 3284 DCHECK(p->IsEvacuationCandidate()); |
3271 DCHECK(p->SweepingDone()); | 3285 DCHECK(p->SweepingDone()); |
3272 p->Unlink(); | 3286 p->Unlink(); |
3273 } else { | 3287 } else { |
3274 // We have partially compacted the page, i.e., some objects may have | 3288 // We have partially compacted the page, i.e., some objects may have |
3275 // moved, others are still in place. | 3289 // moved, others are still in place. |
3276 p->ClearEvacuationCandidate(); | 3290 p->ClearEvacuationCandidate(); |
3277 // Slots have already been recorded so we just need to add it to the | 3291 // Slots have already been recorded so we just need to add it to the |
3278 // sweeper. | 3292 // sweeper, which will happen after updating pointers. |
3279 *data += 1; | 3293 *data += 1; |
3280 } | 3294 } |
| 3295 break; |
| 3296 default: |
| 3297 UNREACHABLE(); |
3281 } | 3298 } |
3282 } | 3299 } |
3283 }; | 3300 }; |
3284 | 3301 |
3285 void MarkCompactCollector::EvacuatePagesInParallel() { | 3302 void MarkCompactCollector::EvacuatePagesInParallel() { |
3286 PageParallelJob<EvacuationJobTraits> job( | 3303 PageParallelJob<EvacuationJobTraits> job( |
3287 heap_, heap_->isolate()->cancelable_task_manager(), | 3304 heap_, heap_->isolate()->cancelable_task_manager(), |
3288 &page_parallel_job_semaphore_); | 3305 &page_parallel_job_semaphore_); |
3289 | 3306 |
3290 int abandoned_pages = 0; | 3307 int abandoned_pages = 0; |
3291 intptr_t live_bytes = 0; | 3308 intptr_t live_bytes = 0; |
3292 for (Page* page : evacuation_candidates_) { | 3309 for (Page* page : evacuation_candidates_) { |
3293 live_bytes += page->LiveBytes(); | 3310 live_bytes += page->LiveBytes(); |
3294 job.AddPage(page, &abandoned_pages); | 3311 job.AddPage(page, &abandoned_pages); |
3295 } | 3312 } |
3296 | 3313 |
3297 const Address age_mark = heap()->new_space()->age_mark(); | 3314 const Address age_mark = heap()->new_space()->age_mark(); |
3298 for (Page* page : newspace_evacuation_candidates_) { | 3315 for (Page* page : newspace_evacuation_candidates_) { |
3299 live_bytes += page->LiveBytes(); | 3316 live_bytes += page->LiveBytes(); |
3300 if (!page->NeverEvacuate() && | 3317 if (!page->NeverEvacuate() && |
3301 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && | 3318 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && |
3302 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && | |
3303 !page->Contains(age_mark)) { | 3319 !page->Contains(age_mark)) { |
3304 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space()); | 3320 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
| 3321 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); |
| 3322 } else { |
| 3323 EvacuateNewSpacePageVisitor::MoveToToSpace(page); |
| 3324 } |
3305 } | 3325 } |
| 3326 |
3306 job.AddPage(page, &abandoned_pages); | 3327 job.AddPage(page, &abandoned_pages); |
3307 } | 3328 } |
3308 DCHECK_GE(job.NumberOfPages(), 1); | 3329 DCHECK_GE(job.NumberOfPages(), 1); |
3309 | 3330 |
3310 // Used for trace summary. | 3331 // Used for trace summary. |
3311 double compaction_speed = 0; | 3332 double compaction_speed = 0; |
3312 if (FLAG_trace_evacuation) { | 3333 if (FLAG_trace_evacuation) { |
3313 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3334 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3314 } | 3335 } |
3315 | 3336 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3349 return map_word.ToForwardingAddress(); | 3370 return map_word.ToForwardingAddress(); |
3350 } | 3371 } |
3351 } | 3372 } |
3352 return object; | 3373 return object; |
3353 } | 3374 } |
3354 }; | 3375 }; |
3355 | 3376 |
3356 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, | 3377 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, |
3357 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, | 3378 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, |
3358 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, | 3379 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, |
| 3380 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode, |
3359 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> | 3381 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> |
3360 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, | 3382 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, |
3361 ObjectVisitor* v) { | 3383 ObjectVisitor* v) { |
3362 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); | 3384 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); |
3363 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); | 3385 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); |
3364 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 3386 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) || |
3365 space->identity() == CODE_SPACE); | 3387 (skip_list_mode == REBUILD_SKIP_LIST)); |
3366 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3388 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
3367 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); | 3389 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); |
3368 | 3390 |
3369 // Before we sweep objects on the page, we free dead array buffers which | 3391 // Before we sweep objects on the page, we free dead array buffers which |
3370 // requires valid mark bits. | 3392 // requires valid mark bits. |
3371 ArrayBufferTracker::FreeDead(p); | 3393 ArrayBufferTracker::FreeDead(p); |
3372 | 3394 |
3373 Address free_start = p->area_start(); | 3395 Address free_start = p->area_start(); |
3374 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3396 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
3375 | 3397 |
(...skipping 12 matching lines...) Expand all Loading... |
3388 LiveObjectIterator<kBlackObjects> it(p); | 3410 LiveObjectIterator<kBlackObjects> it(p); |
3389 HeapObject* object = NULL; | 3411 HeapObject* object = NULL; |
3390 while ((object = it.Next()) != NULL) { | 3412 while ((object = it.Next()) != NULL) { |
3391 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3413 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
3392 Address free_end = object->address(); | 3414 Address free_end = object->address(); |
3393 if (free_end != free_start) { | 3415 if (free_end != free_start) { |
3394 int size = static_cast<int>(free_end - free_start); | 3416 int size = static_cast<int>(free_end - free_start); |
3395 if (free_space_mode == ZAP_FREE_SPACE) { | 3417 if (free_space_mode == ZAP_FREE_SPACE) { |
3396 memset(free_start, 0xcc, size); | 3418 memset(free_start, 0xcc, size); |
3397 } | 3419 } |
3398 freed_bytes = space->UnaccountedFree(free_start, size); | 3420 if (free_list_mode == REBUILD_FREE_LIST) { |
3399 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3421 freed_bytes = space->UnaccountedFree(free_start, size); |
| 3422 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 3423 } else { |
| 3424 p->heap()->CreateFillerObjectAt(free_start, size, |
| 3425 ClearRecordedSlots::kNo); |
| 3426 } |
3400 } | 3427 } |
3401 Map* map = object->synchronized_map(); | 3428 Map* map = object->synchronized_map(); |
3402 int size = object->SizeFromMap(map); | 3429 int size = object->SizeFromMap(map); |
3403 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { | 3430 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
3404 object->IterateBody(map->instance_type(), size, v); | 3431 object->IterateBody(map->instance_type(), size, v); |
3405 } | 3432 } |
3406 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { | 3433 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
3407 int new_region_start = SkipList::RegionNumber(free_end); | 3434 int new_region_start = SkipList::RegionNumber(free_end); |
3408 int new_region_end = | 3435 int new_region_end = |
3409 SkipList::RegionNumber(free_end + size - kPointerSize); | 3436 SkipList::RegionNumber(free_end + size - kPointerSize); |
3410 if (new_region_start != curr_region || new_region_end != curr_region) { | 3437 if (new_region_start != curr_region || new_region_end != curr_region) { |
3411 skip_list->AddObject(free_end, size); | 3438 skip_list->AddObject(free_end, size); |
3412 curr_region = new_region_end; | 3439 curr_region = new_region_end; |
3413 } | 3440 } |
3414 } | 3441 } |
3415 free_start = free_end + size; | 3442 free_start = free_end + size; |
3416 } | 3443 } |
3417 | 3444 |
3418 // Clear the mark bits of that page and reset live bytes count. | 3445 // Clear the mark bits of that page and reset live bytes count. |
3419 Bitmap::Clear(p); | 3446 Bitmap::Clear(p); |
3420 | 3447 |
3421 if (free_start != p->area_end()) { | 3448 if (free_start != p->area_end()) { |
3422 int size = static_cast<int>(p->area_end() - free_start); | 3449 int size = static_cast<int>(p->area_end() - free_start); |
3423 if (free_space_mode == ZAP_FREE_SPACE) { | 3450 if (free_space_mode == ZAP_FREE_SPACE) { |
3424 memset(free_start, 0xcc, size); | 3451 memset(free_start, 0xcc, size); |
3425 } | 3452 } |
3426 freed_bytes = space->UnaccountedFree(free_start, size); | 3453 if (free_list_mode == REBUILD_FREE_LIST) { |
3427 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3454 freed_bytes = space->UnaccountedFree(free_start, size); |
| 3455 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 3456 } else { |
| 3457 p->heap()->CreateFillerObjectAt(free_start, size, |
| 3458 ClearRecordedSlots::kNo); |
| 3459 } |
3428 } | 3460 } |
3429 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3461 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3462 if (free_list_mode == IGNORE_FREE_LIST) return 0; |
3430 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3463 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
3431 } | 3464 } |
3432 | 3465 |
3433 void MarkCompactCollector::InvalidateCode(Code* code) { | 3466 void MarkCompactCollector::InvalidateCode(Code* code) { |
3434 if (heap_->incremental_marking()->IsCompacting() && | 3467 if (heap_->incremental_marking()->IsCompacting() && |
3435 !ShouldSkipEvacuationSlotRecording(code)) { | 3468 !ShouldSkipEvacuationSlotRecording(code)) { |
3436 DCHECK(compacting_); | 3469 DCHECK(compacting_); |
3437 | 3470 |
3438 // If the object is white than no slots were recorded on it yet. | 3471 // If the object is white than no slots were recorded on it yet. |
3439 MarkBit mark_bit = Marking::MarkBitFrom(code); | 3472 MarkBit mark_bit = Marking::MarkBitFrom(code); |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3535 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3568 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
3536 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3569 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
3537 Heap::RelocationLock relocation_lock(heap()); | 3570 Heap::RelocationLock relocation_lock(heap()); |
3538 | 3571 |
3539 { | 3572 { |
3540 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | 3573 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
3541 EvacuationScope evacuation_scope(this); | 3574 EvacuationScope evacuation_scope(this); |
3542 | 3575 |
3543 EvacuateNewSpacePrologue(); | 3576 EvacuateNewSpacePrologue(); |
3544 EvacuatePagesInParallel(); | 3577 EvacuatePagesInParallel(); |
3545 EvacuateNewSpaceEpilogue(); | |
3546 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | 3578 heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
3547 } | 3579 } |
3548 | 3580 |
3549 UpdatePointersAfterEvacuation(); | 3581 UpdatePointersAfterEvacuation(); |
3550 | 3582 |
| 3583 if (!heap()->new_space()->Rebalance()) { |
| 3584 FatalProcessOutOfMemory("NewSpace::Rebalance"); |
| 3585 } |
| 3586 |
3551 // Give pages that are queued to be freed back to the OS. Note that filtering | 3587 // Give pages that are queued to be freed back to the OS. Note that filtering |
3552 // slots only handles old space (for unboxed doubles), and thus map space can | 3588 // slots only handles old space (for unboxed doubles), and thus map space can |
3553 // still contain stale pointers. We only free the chunks after pointer updates | 3589 // still contain stale pointers. We only free the chunks after pointer updates |
3554 // to still have access to page headers. | 3590 // to still have access to page headers. |
3555 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3591 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
3556 | 3592 |
3557 { | 3593 { |
3558 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3594 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
3559 | 3595 |
| 3596 for (Page* p : newspace_evacuation_candidates_) { |
| 3597 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3598 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); |
| 3599 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3600 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { |
| 3601 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); |
| 3602 p->ForAllFreeListCategories( |
| 3603 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); |
| 3604 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3605 } |
| 3606 } |
| 3607 newspace_evacuation_candidates_.Rewind(0); |
| 3608 |
3560 for (Page* p : evacuation_candidates_) { | 3609 for (Page* p : evacuation_candidates_) { |
3561 // Important: skip list should be cleared only after roots were updated | 3610 // Important: skip list should be cleared only after roots were updated |
3562 // because root iteration traverses the stack and might have to find | 3611 // because root iteration traverses the stack and might have to find |
3563 // code objects from non-updated pc pointing into evacuation candidate. | 3612 // code objects from non-updated pc pointing into evacuation candidate. |
3564 SkipList* list = p->skip_list(); | 3613 SkipList* list = p->skip_list(); |
3565 if (list != NULL) list->Clear(); | 3614 if (list != NULL) list->Clear(); |
3566 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3615 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
3567 sweeper().AddLatePage(p->owner()->identity(), p); | 3616 sweeper().AddLatePage(p->owner()->identity(), p); |
3568 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); | 3617 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
3569 } | 3618 } |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3644 // Update the corresponding slot. | 3693 // Update the corresponding slot. |
3645 *slot = map_word.ToForwardingAddress(); | 3694 *slot = map_word.ToForwardingAddress(); |
3646 } | 3695 } |
3647 // If the object was in from space before and is after executing the | 3696 // If the object was in from space before and is after executing the |
3648 // callback in to space, the object is still live. | 3697 // callback in to space, the object is still live. |
3649 // Unfortunately, we do not know about the slot. It could be in a | 3698 // Unfortunately, we do not know about the slot. It could be in a |
3650 // just freed free space object. | 3699 // just freed free space object. |
3651 if (heap->InToSpace(*slot)) { | 3700 if (heap->InToSpace(*slot)) { |
3652 return KEEP_SLOT; | 3701 return KEEP_SLOT; |
3653 } | 3702 } |
| 3703 } else if (heap->InToSpace(*slot)) { |
| 3704 DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address()) |
| 3705 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)); |
| 3706 // Slots can be in "to" space after a page has been moved. Since there is |
| 3707 // no forwarding information present we need to check the markbits to |
| 3708 // determine liveness. |
| 3709 if (Marking::IsBlack( |
| 3710 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot)))) |
| 3711 return KEEP_SLOT; |
3654 } else { | 3712 } else { |
3655 DCHECK(!heap->InNewSpace(*slot)); | 3713 DCHECK(!heap->InNewSpace(*slot)); |
3656 } | 3714 } |
3657 return REMOVE_SLOT; | 3715 return REMOVE_SLOT; |
3658 } | 3716 } |
3659 }; | 3717 }; |
3660 | 3718 |
3661 int NumberOfPointerUpdateTasks(int pages) { | 3719 int NumberOfPointerUpdateTasks(int pages) { |
3662 if (!FLAG_parallel_pointer_update) return 1; | 3720 if (!FLAG_parallel_pointer_update) return 1; |
3663 const int kMaxTasks = 4; | 3721 const int kMaxTasks = 4; |
(...skipping 12 matching lines...) Expand all Loading... |
3676 job.Run(num_tasks, [](int i) { return 0; }); | 3734 job.Run(num_tasks, [](int i) { return 0; }); |
3677 } | 3735 } |
3678 | 3736 |
3679 class ToSpacePointerUpdateJobTraits { | 3737 class ToSpacePointerUpdateJobTraits { |
3680 public: | 3738 public: |
3681 typedef std::pair<Address, Address> PerPageData; | 3739 typedef std::pair<Address, Address> PerPageData; |
3682 typedef PointersUpdatingVisitor* PerTaskData; | 3740 typedef PointersUpdatingVisitor* PerTaskData; |
3683 | 3741 |
3684 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 3742 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
3685 MemoryChunk* chunk, PerPageData limits) { | 3743 MemoryChunk* chunk, PerPageData limits) { |
| 3744 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3745 // New->new promoted pages contain garbage so they require iteration |
| 3746 // using markbits. |
| 3747 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); |
| 3748 } else { |
| 3749 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); |
| 3750 } |
| 3751 return true; |
| 3752 } |
| 3753 |
| 3754 static const bool NeedSequentialFinalization = false; |
| 3755 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
| 3756 } |
| 3757 |
| 3758 private: |
| 3759 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
| 3760 MemoryChunk* chunk, |
| 3761 PerPageData limits) { |
3686 for (Address cur = limits.first; cur < limits.second;) { | 3762 for (Address cur = limits.first; cur < limits.second;) { |
3687 HeapObject* object = HeapObject::FromAddress(cur); | 3763 HeapObject* object = HeapObject::FromAddress(cur); |
3688 Map* map = object->map(); | 3764 Map* map = object->map(); |
3689 int size = object->SizeFromMap(map); | 3765 int size = object->SizeFromMap(map); |
3690 object->IterateBody(map->instance_type(), size, visitor); | 3766 object->IterateBody(map->instance_type(), size, visitor); |
3691 cur += size; | 3767 cur += size; |
3692 } | 3768 } |
3693 return true; | |
3694 } | 3769 } |
3695 static const bool NeedSequentialFinalization = false; | 3770 |
3696 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 3771 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
| 3772 MemoryChunk* chunk, |
| 3773 PerPageData limits) { |
| 3774 LiveObjectIterator<kBlackObjects> it(chunk); |
| 3775 HeapObject* object = NULL; |
| 3776 while ((object = it.Next()) != NULL) { |
| 3777 Map* map = object->map(); |
| 3778 int size = object->SizeFromMap(map); |
| 3779 object->IterateBody(map->instance_type(), size, visitor); |
| 3780 } |
3697 } | 3781 } |
3698 }; | 3782 }; |
3699 | 3783 |
3700 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 3784 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
3701 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 3785 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
3702 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 3786 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3703 Address space_start = heap->new_space()->bottom(); | 3787 Address space_start = heap->new_space()->bottom(); |
3704 Address space_end = heap->new_space()->top(); | 3788 Address space_end = heap->new_space()->top(); |
3705 NewSpacePageIterator it(space_start, space_end); | 3789 NewSpacePageIterator it(space_start, space_end); |
3706 while (it.has_next()) { | 3790 while (it.has_next()) { |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3762 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3846 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
3763 } | 3847 } |
3764 | 3848 |
3765 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, | 3849 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, |
3766 int required_freed_bytes, | 3850 int required_freed_bytes, |
3767 int max_pages) { | 3851 int max_pages) { |
3768 int max_freed = 0; | 3852 int max_freed = 0; |
3769 int pages_freed = 0; | 3853 int pages_freed = 0; |
3770 Page* page = nullptr; | 3854 Page* page = nullptr; |
3771 while ((page = GetSweepingPageSafe(identity)) != nullptr) { | 3855 while ((page = GetSweepingPageSafe(identity)) != nullptr) { |
3772 int freed = ParallelSweepPage(page, heap_->paged_space(identity)); | 3856 int freed = ParallelSweepPage(page, identity); |
3773 pages_freed += 1; | 3857 pages_freed += 1; |
3774 DCHECK_GE(freed, 0); | 3858 DCHECK_GE(freed, 0); |
3775 max_freed = Max(max_freed, freed); | 3859 max_freed = Max(max_freed, freed); |
3776 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) | 3860 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) |
3777 return max_freed; | 3861 return max_freed; |
3778 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; | 3862 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; |
3779 } | 3863 } |
3780 return max_freed; | 3864 return max_freed; |
3781 } | 3865 } |
3782 | 3866 |
3783 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, | 3867 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, |
3784 PagedSpace* space) { | 3868 AllocationSpace identity) { |
3785 int max_freed = 0; | 3869 int max_freed = 0; |
3786 if (page->mutex()->TryLock()) { | 3870 if (page->mutex()->TryLock()) { |
3787 // If this page was already swept in the meantime, we can return here. | 3871 // If this page was already swept in the meantime, we can return here. |
3788 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3872 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
3789 page->mutex()->Unlock(); | 3873 page->mutex()->Unlock(); |
3790 return 0; | 3874 return 0; |
3791 } | 3875 } |
3792 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3876 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3793 if (space->identity() == OLD_SPACE) { | 3877 if (identity == NEW_SPACE) { |
| 3878 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3879 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr); |
| 3880 } else if (identity == OLD_SPACE) { |
3794 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3881 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
3795 IGNORE_FREE_SPACE>(space, page, NULL); | 3882 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
3796 } else if (space->identity() == CODE_SPACE) { | 3883 heap_->paged_space(identity), page, nullptr); |
| 3884 } else if (identity == CODE_SPACE) { |
3797 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, | 3885 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, |
3798 IGNORE_FREE_SPACE>(space, page, NULL); | 3886 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3887 heap_->paged_space(identity), page, nullptr); |
3799 } else { | 3888 } else { |
3800 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3889 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
3801 IGNORE_FREE_SPACE>(space, page, NULL); | 3890 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3891 heap_->paged_space(identity), page, nullptr); |
3802 } | 3892 } |
3803 { | 3893 { |
3804 base::LockGuard<base::Mutex> guard(&mutex_); | 3894 base::LockGuard<base::Mutex> guard(&mutex_); |
3805 swept_list_[space->identity()].Add(page); | 3895 swept_list_[identity].Add(page); |
3806 } | 3896 } |
3807 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3897 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3808 page->mutex()->Unlock(); | 3898 page->mutex()->Unlock(); |
3809 } | 3899 } |
3810 return max_freed; | 3900 return max_freed; |
3811 } | 3901 } |
3812 | 3902 |
3813 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { | 3903 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { |
3814 DCHECK(!sweeping_in_progress_); | 3904 DCHECK(!sweeping_in_progress_); |
3815 PrepareToBeSweptPage(space, page); | 3905 PrepareToBeSweptPage(space, page); |
3816 sweeping_list_[space].push_back(page); | 3906 sweeping_list_[space].push_back(page); |
3817 } | 3907 } |
3818 | 3908 |
3819 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, | 3909 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, |
3820 Page* page) { | 3910 Page* page) { |
3821 DCHECK(sweeping_in_progress_); | 3911 DCHECK(sweeping_in_progress_); |
3822 PrepareToBeSweptPage(space, page); | 3912 PrepareToBeSweptPage(space, page); |
3823 late_pages_ = true; | 3913 late_pages_ = true; |
3824 AddSweepingPageSafe(space, page); | 3914 AddSweepingPageSafe(space, page); |
3825 } | 3915 } |
3826 | 3916 |
3827 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, | 3917 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, |
3828 Page* page) { | 3918 Page* page) { |
3829 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3919 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
3830 int to_sweep = page->area_size() - page->LiveBytes(); | 3920 int to_sweep = page->area_size() - page->LiveBytes(); |
3831 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); | 3921 if (space != NEW_SPACE) |
| 3922 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); |
3832 } | 3923 } |
3833 | 3924 |
3834 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( | 3925 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( |
3835 AllocationSpace space) { | 3926 AllocationSpace space) { |
3836 base::LockGuard<base::Mutex> guard(&mutex_); | 3927 base::LockGuard<base::Mutex> guard(&mutex_); |
3837 Page* page = nullptr; | 3928 Page* page = nullptr; |
3838 if (!sweeping_list_[space].empty()) { | 3929 if (!sweeping_list_[space].empty()) { |
3839 page = sweeping_list_[space].front(); | 3930 page = sweeping_list_[space].front(); |
3840 sweeping_list_[space].pop_front(); | 3931 sweeping_list_[space].pop_front(); |
3841 } | 3932 } |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3885 continue; | 3976 continue; |
3886 } | 3977 } |
3887 | 3978 |
3888 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3979 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
3889 // We need to sweep the page to get it into an iterable state again. Note | 3980 // We need to sweep the page to get it into an iterable state again. Note |
3890 // that this adds unusable memory into the free list that is later on | 3981 // that this adds unusable memory into the free list that is later on |
3891 // (in the free list) dropped again. Since we only use the flag for | 3982 // (in the free list) dropped again. Since we only use the flag for |
3892 // testing this is fine. | 3983 // testing this is fine. |
3893 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3984 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3894 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | 3985 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
3895 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( | 3986 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST, |
3896 space, p, nullptr); | 3987 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); |
3897 continue; | 3988 continue; |
3898 } | 3989 } |
3899 | 3990 |
3900 // One unused page is kept, all further are released before sweeping them. | 3991 // One unused page is kept, all further are released before sweeping them. |
3901 if (p->LiveBytes() == 0) { | 3992 if (p->LiveBytes() == 0) { |
3902 if (unused_page_present) { | 3993 if (unused_page_present) { |
3903 if (FLAG_gc_verbose) { | 3994 if (FLAG_gc_verbose) { |
3904 PrintIsolate(isolate(), "sweeping: released page: %p", | 3995 PrintIsolate(isolate(), "sweeping: released page: %p", |
3905 static_cast<void*>(p)); | 3996 static_cast<void*>(p)); |
3906 } | 3997 } |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3992 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4083 MarkBit mark_bit = Marking::MarkBitFrom(host); |
3993 if (Marking::IsBlack(mark_bit)) { | 4084 if (Marking::IsBlack(mark_bit)) { |
3994 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4085 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
3995 RecordRelocSlot(host, &rinfo, target); | 4086 RecordRelocSlot(host, &rinfo, target); |
3996 } | 4087 } |
3997 } | 4088 } |
3998 } | 4089 } |
3999 | 4090 |
4000 } // namespace internal | 4091 } // namespace internal |
4001 } // namespace v8 | 4092 } // namespace v8 |
OLD | NEW |