OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 454 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
465 AllocationSpace space_to_start) | 465 AllocationSpace space_to_start) |
466 : sweeper_(sweeper), | 466 : sweeper_(sweeper), |
467 pending_sweeper_tasks_(pending_sweeper_tasks), | 467 pending_sweeper_tasks_(pending_sweeper_tasks), |
468 space_to_start_(space_to_start) {} | 468 space_to_start_(space_to_start) {} |
469 | 469 |
470 virtual ~SweeperTask() {} | 470 virtual ~SweeperTask() {} |
471 | 471 |
472 private: | 472 private: |
473 // v8::Task overrides. | 473 // v8::Task overrides. |
474 void Run() override { | 474 void Run() override { |
475 DCHECK_GE(space_to_start_, FIRST_SPACE); | 475 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); |
476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
477 const int offset = space_to_start_ - FIRST_SPACE; | 477 const int offset = space_to_start_ - FIRST_PAGED_SPACE; |
478 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1; | 478 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
479 for (int i = 0; i < num_spaces; i++) { | 479 for (int i = 0; i < num_spaces; i++) { |
480 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces); | 480 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); |
481 DCHECK_GE(space_id, FIRST_SPACE); | 481 DCHECK_GE(space_id, FIRST_PAGED_SPACE); |
482 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 482 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); | 483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); |
484 } | 484 } |
485 pending_sweeper_tasks_->Signal(); | 485 pending_sweeper_tasks_->Signal(); |
486 } | 486 } |
487 | 487 |
488 Sweeper* sweeper_; | 488 Sweeper* sweeper_; |
489 base::Semaphore* pending_sweeper_tasks_; | 489 base::Semaphore* pending_sweeper_tasks_; |
490 AllocationSpace space_to_start_; | 490 AllocationSpace space_to_start_; |
491 | 491 |
(...skipping 17 matching lines...) Expand all Loading... |
509 void MarkCompactCollector::Sweeper::StartSweepingHelper( | 509 void MarkCompactCollector::Sweeper::StartSweepingHelper( |
510 AllocationSpace space_to_start) { | 510 AllocationSpace space_to_start) { |
511 num_sweeping_tasks_.Increment(1); | 511 num_sweeping_tasks_.Increment(1); |
512 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 512 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), | 513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), |
514 v8::Platform::kShortRunningTask); | 514 v8::Platform::kShortRunningTask); |
515 } | 515 } |
516 | 516 |
517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( | 517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
518 Page* page) { | 518 Page* page) { |
| 519 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
519 if (!page->SweepingDone()) { | 520 if (!page->SweepingDone()) { |
520 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | 521 ParallelSweepPage(page, owner); |
521 ParallelSweepPage(page, owner->identity()); | |
522 if (!page->SweepingDone()) { | 522 if (!page->SweepingDone()) { |
523 // We were not able to sweep that page, i.e., a concurrent | 523 // We were not able to sweep that page, i.e., a concurrent |
524 // sweeper thread currently owns this page. Wait for the sweeper | 524 // sweeper thread currently owns this page. Wait for the sweeper |
525 // thread to be done with this page. | 525 // thread to be done with this page. |
526 page->WaitUntilSweepingCompleted(); | 526 page->WaitUntilSweepingCompleted(); |
527 } | 527 } |
528 } | 528 } |
529 } | 529 } |
530 | 530 |
531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
(...skipping 22 matching lines...) Expand all Loading... |
554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); | 554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); |
555 } | 555 } |
556 | 556 |
557 if (FLAG_concurrent_sweeping) { | 557 if (FLAG_concurrent_sweeping) { |
558 while (num_sweeping_tasks_.Value() > 0) { | 558 while (num_sweeping_tasks_.Value() > 0) { |
559 pending_sweeper_tasks_semaphore_.Wait(); | 559 pending_sweeper_tasks_semaphore_.Wait(); |
560 num_sweeping_tasks_.Increment(-1); | 560 num_sweeping_tasks_.Increment(-1); |
561 } | 561 } |
562 } | 562 } |
563 | 563 |
564 ForAllSweepingSpaces([this](AllocationSpace space) { | 564 ForAllSweepingSpaces( |
565 if (space == NEW_SPACE) { | 565 [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); }); |
566 swept_list_[NEW_SPACE].Clear(); | |
567 } | |
568 DCHECK(sweeping_list_[space].empty()); | |
569 }); | |
570 late_pages_ = false; | 566 late_pages_ = false; |
571 sweeping_in_progress_ = false; | 567 sweeping_in_progress_ = false; |
572 } | 568 } |
573 | 569 |
574 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { | |
575 if (!sweeping_in_progress_) return; | |
576 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { | |
577 NewSpacePageIterator pit(heap_->new_space()); | |
578 while (pit.has_next()) { | |
579 Page* page = pit.next(); | |
580 SweepOrWaitUntilSweepingCompleted(page); | |
581 } | |
582 } | |
583 } | |
584 | |
585 void MarkCompactCollector::EnsureSweepingCompleted() { | 570 void MarkCompactCollector::EnsureSweepingCompleted() { |
586 if (!sweeper().sweeping_in_progress()) return; | 571 if (!sweeper().sweeping_in_progress()) return; |
587 | 572 |
588 sweeper().EnsureCompleted(); | 573 sweeper().EnsureCompleted(); |
589 heap()->old_space()->RefillFreeList(); | 574 heap()->old_space()->RefillFreeList(); |
590 heap()->code_space()->RefillFreeList(); | 575 heap()->code_space()->RefillFreeList(); |
591 heap()->map_space()->RefillFreeList(); | 576 heap()->map_space()->RefillFreeList(); |
592 | 577 |
593 #ifdef VERIFY_HEAP | 578 #ifdef VERIFY_HEAP |
594 if (FLAG_verify_heap && !evacuation()) { | 579 if (FLAG_verify_heap && !evacuation()) { |
(...skipping 1284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1879 AllocationSpace space_to_allocate_; | 1864 AllocationSpace space_to_allocate_; |
1880 intptr_t promoted_size_; | 1865 intptr_t promoted_size_; |
1881 intptr_t semispace_copied_size_; | 1866 intptr_t semispace_copied_size_; |
1882 base::HashMap* local_pretenuring_feedback_; | 1867 base::HashMap* local_pretenuring_feedback_; |
1883 }; | 1868 }; |
1884 | 1869 |
1885 class MarkCompactCollector::EvacuateNewSpacePageVisitor final | 1870 class MarkCompactCollector::EvacuateNewSpacePageVisitor final |
1886 : public MarkCompactCollector::HeapObjectVisitor { | 1871 : public MarkCompactCollector::HeapObjectVisitor { |
1887 public: | 1872 public: |
1888 explicit EvacuateNewSpacePageVisitor(Heap* heap) | 1873 explicit EvacuateNewSpacePageVisitor(Heap* heap) |
1889 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {} | 1874 : heap_(heap), promoted_size_(0) {} |
1890 | 1875 |
1891 static void MoveToOldSpace(Page* page, PagedSpace* owner) { | 1876 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { |
1892 page->Unlink(); | 1877 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { |
1893 Page* new_page = Page::ConvertNewToOld(page, owner); | 1878 Page* new_page = Page::ConvertNewToOld(page, owner); |
1894 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); | 1879 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); |
1895 } | 1880 } |
1896 | |
1897 static void MoveToToSpace(Page* page) { | |
1898 page->heap()->new_space()->MovePageFromSpaceToSpace(page); | |
1899 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); | |
1900 } | 1881 } |
1901 | 1882 |
1902 inline bool Visit(HeapObject* object) { | 1883 inline bool Visit(HeapObject* object) { |
1903 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); | 1884 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); |
1904 object->IterateBodyFast(&visitor); | 1885 object->IterateBodyFast(&visitor); |
1905 promoted_size_ += object->Size(); | 1886 promoted_size_ += object->Size(); |
1906 return true; | 1887 return true; |
1907 } | 1888 } |
1908 | 1889 |
1909 intptr_t promoted_size() { return promoted_size_; } | 1890 intptr_t promoted_size() { return promoted_size_; } |
1910 intptr_t semispace_copied_size() { return semispace_copied_size_; } | |
1911 | |
1912 void account_semispace_copied(intptr_t copied) { | |
1913 semispace_copied_size_ += copied; | |
1914 } | |
1915 | 1891 |
1916 private: | 1892 private: |
1917 Heap* heap_; | 1893 Heap* heap_; |
1918 intptr_t promoted_size_; | 1894 intptr_t promoted_size_; |
1919 intptr_t semispace_copied_size_; | |
1920 }; | 1895 }; |
1921 | 1896 |
1922 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1897 class MarkCompactCollector::EvacuateOldSpaceVisitor final |
1923 : public MarkCompactCollector::EvacuateVisitorBase { | 1898 : public MarkCompactCollector::EvacuateVisitorBase { |
1924 public: | 1899 public: |
1925 EvacuateOldSpaceVisitor(Heap* heap, | 1900 EvacuateOldSpaceVisitor(Heap* heap, |
1926 CompactionSpaceCollection* compaction_spaces) | 1901 CompactionSpaceCollection* compaction_spaces) |
1927 : EvacuateVisitorBase(heap, compaction_spaces) {} | 1902 : EvacuateVisitorBase(heap, compaction_spaces) {} |
1928 | 1903 |
1929 inline bool Visit(HeapObject* object) override { | 1904 inline bool Visit(HeapObject* object) override { |
(...skipping 1123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3053 NewSpace* new_space = heap()->new_space(); | 3028 NewSpace* new_space = heap()->new_space(); |
3054 NewSpacePageIterator it(new_space->bottom(), new_space->top()); | 3029 NewSpacePageIterator it(new_space->bottom(), new_space->top()); |
3055 // Append the list of new space pages to be processed. | 3030 // Append the list of new space pages to be processed. |
3056 while (it.has_next()) { | 3031 while (it.has_next()) { |
3057 newspace_evacuation_candidates_.Add(it.next()); | 3032 newspace_evacuation_candidates_.Add(it.next()); |
3058 } | 3033 } |
3059 new_space->Flip(); | 3034 new_space->Flip(); |
3060 new_space->ResetAllocationInfo(); | 3035 new_space->ResetAllocationInfo(); |
3061 } | 3036 } |
3062 | 3037 |
| 3038 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { |
| 3039 newspace_evacuation_candidates_.Rewind(0); |
| 3040 } |
| 3041 |
3063 class MarkCompactCollector::Evacuator : public Malloced { | 3042 class MarkCompactCollector::Evacuator : public Malloced { |
3064 public: | 3043 public: |
3065 enum EvacuationMode { | |
3066 kObjectsNewToOld, | |
3067 kPageNewToOld, | |
3068 kObjectsOldToOld, | |
3069 kPageNewToNew, | |
3070 }; | |
3071 | |
3072 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { | |
3073 // Note: The order of checks is important in this function. | |
3074 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) | |
3075 return kPageNewToOld; | |
3076 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)) | |
3077 return kPageNewToNew; | |
3078 if (chunk->InNewSpace()) return kObjectsNewToOld; | |
3079 DCHECK(chunk->IsEvacuationCandidate()); | |
3080 return kObjectsOldToOld; | |
3081 } | |
3082 | |
3083 // NewSpacePages with more live bytes than this threshold qualify for fast | 3044 // NewSpacePages with more live bytes than this threshold qualify for fast |
3084 // evacuation. | 3045 // evacuation. |
3085 static int PageEvacuationThreshold() { | 3046 static int PageEvacuationThreshold() { |
3086 if (FLAG_page_promotion) | 3047 if (FLAG_page_promotion) |
3087 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; | 3048 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; |
3088 return Page::kAllocatableMemory + kPointerSize; | 3049 return Page::kAllocatableMemory + kPointerSize; |
3089 } | 3050 } |
3090 | 3051 |
3091 explicit Evacuator(MarkCompactCollector* collector) | 3052 explicit Evacuator(MarkCompactCollector* collector) |
3092 : collector_(collector), | 3053 : collector_(collector), |
3093 compaction_spaces_(collector->heap()), | 3054 compaction_spaces_(collector->heap()), |
3094 local_pretenuring_feedback_(base::HashMap::PointersMatch, | 3055 local_pretenuring_feedback_(base::HashMap::PointersMatch, |
3095 kInitialLocalPretenuringFeedbackCapacity), | 3056 kInitialLocalPretenuringFeedbackCapacity), |
3096 new_space_visitor_(collector->heap(), &compaction_spaces_, | 3057 new_space_visitor_(collector->heap(), &compaction_spaces_, |
3097 &local_pretenuring_feedback_), | 3058 &local_pretenuring_feedback_), |
3098 new_space_page_visitor(collector->heap()), | 3059 new_space_page_visitor(collector->heap()), |
3099 old_space_visitor_(collector->heap(), &compaction_spaces_), | 3060 old_space_visitor_(collector->heap(), &compaction_spaces_), |
3100 duration_(0.0), | 3061 duration_(0.0), |
3101 bytes_compacted_(0) {} | 3062 bytes_compacted_(0) {} |
3102 | 3063 |
3103 inline bool EvacuatePage(Page* chunk); | 3064 inline bool EvacuatePage(Page* chunk); |
3104 | 3065 |
3105 // Merge back locally cached info sequentially. Note that this method needs | 3066 // Merge back locally cached info sequentially. Note that this method needs |
3106 // to be called from the main thread. | 3067 // to be called from the main thread. |
3107 inline void Finalize(); | 3068 inline void Finalize(); |
3108 | 3069 |
3109 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | 3070 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } |
3110 | 3071 |
3111 private: | 3072 private: |
| 3073 enum EvacuationMode { |
| 3074 kObjectsNewToOld, |
| 3075 kPageNewToOld, |
| 3076 kObjectsOldToOld, |
| 3077 }; |
| 3078 |
3112 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | 3079 static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
3113 | 3080 |
3114 inline Heap* heap() { return collector_->heap(); } | 3081 inline Heap* heap() { return collector_->heap(); } |
3115 | 3082 |
| 3083 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { |
| 3084 // Note: The order of checks is important in this function. |
| 3085 if (chunk->InNewSpace()) return kObjectsNewToOld; |
| 3086 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) |
| 3087 return kPageNewToOld; |
| 3088 DCHECK(chunk->IsEvacuationCandidate()); |
| 3089 return kObjectsOldToOld; |
| 3090 } |
| 3091 |
3116 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { | 3092 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { |
3117 duration_ += duration; | 3093 duration_ += duration; |
3118 bytes_compacted_ += bytes_compacted; | 3094 bytes_compacted_ += bytes_compacted; |
3119 } | 3095 } |
3120 | 3096 |
| 3097 template <IterationMode mode, class Visitor> |
| 3098 inline bool EvacuateSinglePage(Page* p, Visitor* visitor); |
| 3099 |
3121 MarkCompactCollector* collector_; | 3100 MarkCompactCollector* collector_; |
3122 | 3101 |
3123 // Locally cached collector data. | 3102 // Locally cached collector data. |
3124 CompactionSpaceCollection compaction_spaces_; | 3103 CompactionSpaceCollection compaction_spaces_; |
3125 base::HashMap local_pretenuring_feedback_; | 3104 base::HashMap local_pretenuring_feedback_; |
3126 | 3105 |
3127 // Visitors for the corresponding spaces. | 3106 // Visitors for the corresponding spaces. |
3128 EvacuateNewSpaceVisitor new_space_visitor_; | 3107 EvacuateNewSpaceVisitor new_space_visitor_; |
3129 EvacuateNewSpacePageVisitor new_space_page_visitor; | 3108 EvacuateNewSpacePageVisitor new_space_page_visitor; |
3130 EvacuateOldSpaceVisitor old_space_visitor_; | 3109 EvacuateOldSpaceVisitor old_space_visitor_; |
3131 | 3110 |
3132 // Book keeping info. | 3111 // Book keeping info. |
3133 double duration_; | 3112 double duration_; |
3134 intptr_t bytes_compacted_; | 3113 intptr_t bytes_compacted_; |
3135 }; | 3114 }; |
3136 | 3115 |
3137 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { | 3116 template <MarkCompactCollector::IterationMode mode, class Visitor> |
| 3117 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p, |
| 3118 Visitor* visitor) { |
3138 bool success = false; | 3119 bool success = false; |
3139 DCHECK(page->SweepingDone()); | 3120 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || |
3140 int saved_live_bytes = page->LiveBytes(); | 3121 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); |
3141 double evacuation_time = 0.0; | 3122 int saved_live_bytes = p->LiveBytes(); |
3142 Heap* heap = page->heap(); | 3123 double evacuation_time; |
3143 { | 3124 { |
3144 AlwaysAllocateScope always_allocate(heap->isolate()); | 3125 AlwaysAllocateScope always_allocate(heap()->isolate()); |
3145 TimedScope timed_scope(&evacuation_time); | 3126 TimedScope timed_scope(&evacuation_time); |
3146 switch (ComputeEvacuationMode(page)) { | 3127 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); |
3147 case kObjectsNewToOld: | |
3148 success = collector_->VisitLiveObjects(page, &new_space_visitor_, | |
3149 kClearMarkbits); | |
3150 ArrayBufferTracker::ProcessBuffers( | |
3151 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
3152 DCHECK(success); | |
3153 break; | |
3154 case kPageNewToOld: | |
3155 success = collector_->VisitLiveObjects(page, &new_space_page_visitor, | |
3156 kKeepMarking); | |
3157 // ArrayBufferTracker will be updated during sweeping. | |
3158 DCHECK(success); | |
3159 break; | |
3160 case kPageNewToNew: | |
3161 new_space_page_visitor.account_semispace_copied(page->LiveBytes()); | |
3162 // ArrayBufferTracker will be updated during sweeping. | |
3163 success = true; | |
3164 break; | |
3165 case kObjectsOldToOld: | |
3166 success = collector_->VisitLiveObjects(page, &old_space_visitor_, | |
3167 kClearMarkbits); | |
3168 if (!success) { | |
3169 // Aborted compaction page. We have to record slots here, since we | |
3170 // might not have recorded them in first place. | |
3171 // Note: We mark the page as aborted here to be able to record slots | |
3172 // for code objects in |RecordMigratedSlotVisitor|. | |
3173 page->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
3174 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); | |
3175 success = | |
3176 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking); | |
3177 ArrayBufferTracker::ProcessBuffers( | |
3178 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); | |
3179 DCHECK(success); | |
3180 // We need to return failure here to indicate that we want this page | |
3181 // added to the sweeper. | |
3182 success = false; | |
3183 } else { | |
3184 ArrayBufferTracker::ProcessBuffers( | |
3185 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
3186 } | |
3187 break; | |
3188 default: | |
3189 UNREACHABLE(); | |
3190 } | |
3191 } | 3128 } |
3192 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
3193 if (FLAG_trace_evacuation) { | 3129 if (FLAG_trace_evacuation) { |
3194 PrintIsolate(heap->isolate(), | 3130 const char age_mark_tag = |
3195 "evacuation[%p]: page=%p new_space=%d " | 3131 !p->InNewSpace() |
3196 "page_evacuation=%d executable=%d contains_age_mark=%d " | 3132 ? 'x' |
3197 "live_bytes=%d time=%f\n", | 3133 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) |
3198 static_cast<void*>(this), static_cast<void*>(page), | 3134 ? '>' |
3199 page->InNewSpace(), | 3135 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' |
3200 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || | 3136 : '#'; |
3201 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), | 3137 PrintIsolate(heap()->isolate(), |
3202 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), | 3138 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " |
3203 page->Contains(heap->new_space()->age_mark()), | 3139 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", |
3204 saved_live_bytes, evacuation_time); | 3140 static_cast<void*>(this), static_cast<void*>(p), |
| 3141 p->InNewSpace(), age_mark_tag, |
| 3142 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION), |
| 3143 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, |
| 3144 evacuation_time); |
| 3145 } |
| 3146 if (success) { |
| 3147 ReportCompactionProgress(evacuation_time, saved_live_bytes); |
3205 } | 3148 } |
3206 return success; | 3149 return success; |
3207 } | 3150 } |
3208 | 3151 |
| 3152 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { |
| 3153 bool result = false; |
| 3154 DCHECK(page->SweepingDone()); |
| 3155 switch (ComputeEvacuationMode(page)) { |
| 3156 case kObjectsNewToOld: |
| 3157 result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_); |
| 3158 ArrayBufferTracker::ProcessBuffers( |
| 3159 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3160 DCHECK(result); |
| 3161 USE(result); |
| 3162 break; |
| 3163 case kPageNewToOld: |
| 3164 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor); |
| 3165 // ArrayBufferTracker will be updated during sweeping. |
| 3166 DCHECK(result); |
| 3167 USE(result); |
| 3168 break; |
| 3169 case kObjectsOldToOld: |
| 3170 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_); |
| 3171 if (!result) { |
| 3172 // Aborted compaction page. We have to record slots here, since we might |
| 3173 // not have recorded them in first place. |
| 3174 // Note: We mark the page as aborted here to be able to record slots |
| 3175 // for code objects in |RecordMigratedSlotVisitor|. |
| 3176 page->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| 3177 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); |
| 3178 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor); |
| 3179 ArrayBufferTracker::ProcessBuffers( |
| 3180 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); |
| 3181 DCHECK(result); |
| 3182 USE(result); |
| 3183 // We need to return failure here to indicate that we want this page |
| 3184 // added to the sweeper. |
| 3185 return false; |
| 3186 } |
| 3187 ArrayBufferTracker::ProcessBuffers( |
| 3188 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3189 |
| 3190 break; |
| 3191 default: |
| 3192 UNREACHABLE(); |
| 3193 } |
| 3194 return result; |
| 3195 } |
| 3196 |
3209 void MarkCompactCollector::Evacuator::Finalize() { | 3197 void MarkCompactCollector::Evacuator::Finalize() { |
3210 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3198 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
3211 heap()->code_space()->MergeCompactionSpace( | 3199 heap()->code_space()->MergeCompactionSpace( |
3212 compaction_spaces_.Get(CODE_SPACE)); | 3200 compaction_spaces_.Get(CODE_SPACE)); |
3213 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3201 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
3214 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + | 3202 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + |
3215 new_space_page_visitor.promoted_size()); | 3203 new_space_page_visitor.promoted_size()); |
3216 heap()->IncrementSemiSpaceCopiedObjectSize( | 3204 heap()->IncrementSemiSpaceCopiedObjectSize( |
3217 new_space_visitor_.semispace_copied_size() + | 3205 new_space_visitor_.semispace_copied_size()); |
3218 new_space_page_visitor.semispace_copied_size()); | |
3219 heap()->IncrementYoungSurvivorsCounter( | 3206 heap()->IncrementYoungSurvivorsCounter( |
3220 new_space_visitor_.promoted_size() + | 3207 new_space_visitor_.promoted_size() + |
3221 new_space_visitor_.semispace_copied_size() + | 3208 new_space_visitor_.semispace_copied_size() + |
3222 new_space_page_visitor.promoted_size() + | 3209 new_space_page_visitor.promoted_size()); |
3223 new_space_page_visitor.semispace_copied_size()); | |
3224 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | 3210 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
3225 } | 3211 } |
3226 | 3212 |
3227 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3213 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
3228 intptr_t live_bytes) { | 3214 intptr_t live_bytes) { |
3229 if (!FLAG_parallel_compaction) return 1; | 3215 if (!FLAG_parallel_compaction) return 1; |
3230 // Compute the number of needed tasks based on a target compaction time, the | 3216 // Compute the number of needed tasks based on a target compaction time, the |
3231 // profiled compaction speed and marked live memory. | 3217 // profiled compaction speed and marked live memory. |
3232 // | 3218 // |
3233 // The number of parallel compaction tasks is limited by: | 3219 // The number of parallel compaction tasks is limited by: |
(...skipping 27 matching lines...) Expand all Loading... |
3261 | 3247 |
3262 static const bool NeedSequentialFinalization = true; | 3248 static const bool NeedSequentialFinalization = true; |
3263 | 3249 |
3264 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3250 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
3265 MemoryChunk* chunk, PerPageData) { | 3251 MemoryChunk* chunk, PerPageData) { |
3266 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3252 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
3267 } | 3253 } |
3268 | 3254 |
3269 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3255 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
3270 bool success, PerPageData data) { | 3256 bool success, PerPageData data) { |
3271 using Evacuator = MarkCompactCollector::Evacuator; | 3257 if (chunk->InNewSpace()) { |
3272 Page* p = static_cast<Page*>(chunk); | 3258 DCHECK(success); |
3273 switch (Evacuator::ComputeEvacuationMode(p)) { | 3259 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { |
3274 case Evacuator::kPageNewToOld: | 3260 DCHECK(success); |
3275 break; | 3261 Page* p = static_cast<Page*>(chunk); |
3276 case Evacuator::kPageNewToNew: | 3262 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); |
3277 DCHECK(success); | 3263 p->ForAllFreeListCategories( |
3278 break; | 3264 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); |
3279 case Evacuator::kObjectsNewToOld: | 3265 heap->mark_compact_collector()->sweeper().AddLatePage( |
3280 DCHECK(success); | 3266 p->owner()->identity(), p); |
3281 break; | 3267 } else { |
3282 case Evacuator::kObjectsOldToOld: | 3268 Page* p = static_cast<Page*>(chunk); |
3283 if (success) { | 3269 if (success) { |
3284 DCHECK(p->IsEvacuationCandidate()); | 3270 DCHECK(p->IsEvacuationCandidate()); |
3285 DCHECK(p->SweepingDone()); | 3271 DCHECK(p->SweepingDone()); |
3286 p->Unlink(); | 3272 p->Unlink(); |
3287 } else { | 3273 } else { |
3288 // We have partially compacted the page, i.e., some objects may have | 3274 // We have partially compacted the page, i.e., some objects may have |
3289 // moved, others are still in place. | 3275 // moved, others are still in place. |
3290 p->ClearEvacuationCandidate(); | 3276 p->ClearEvacuationCandidate(); |
3291 // Slots have already been recorded so we just need to add it to the | 3277 // Slots have already been recorded so we just need to add it to the |
3292 // sweeper, which will happen after updating pointers. | 3278 // sweeper. |
3293 *data += 1; | 3279 *data += 1; |
3294 } | 3280 } |
3295 break; | |
3296 default: | |
3297 UNREACHABLE(); | |
3298 } | 3281 } |
3299 } | 3282 } |
3300 }; | 3283 }; |
3301 | 3284 |
3302 void MarkCompactCollector::EvacuatePagesInParallel() { | 3285 void MarkCompactCollector::EvacuatePagesInParallel() { |
3303 PageParallelJob<EvacuationJobTraits> job( | 3286 PageParallelJob<EvacuationJobTraits> job( |
3304 heap_, heap_->isolate()->cancelable_task_manager(), | 3287 heap_, heap_->isolate()->cancelable_task_manager(), |
3305 &page_parallel_job_semaphore_); | 3288 &page_parallel_job_semaphore_); |
3306 | 3289 |
3307 int abandoned_pages = 0; | 3290 int abandoned_pages = 0; |
3308 intptr_t live_bytes = 0; | 3291 intptr_t live_bytes = 0; |
3309 for (Page* page : evacuation_candidates_) { | 3292 for (Page* page : evacuation_candidates_) { |
3310 live_bytes += page->LiveBytes(); | 3293 live_bytes += page->LiveBytes(); |
3311 job.AddPage(page, &abandoned_pages); | 3294 job.AddPage(page, &abandoned_pages); |
3312 } | 3295 } |
3313 | 3296 |
3314 const Address age_mark = heap()->new_space()->age_mark(); | 3297 const Address age_mark = heap()->new_space()->age_mark(); |
3315 for (Page* page : newspace_evacuation_candidates_) { | 3298 for (Page* page : newspace_evacuation_candidates_) { |
3316 live_bytes += page->LiveBytes(); | 3299 live_bytes += page->LiveBytes(); |
3317 if (!page->NeverEvacuate() && | 3300 if (!page->NeverEvacuate() && |
3318 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && | 3301 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && |
| 3302 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && |
3319 !page->Contains(age_mark)) { | 3303 !page->Contains(age_mark)) { |
3320 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { | 3304 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space()); |
3321 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); | |
3322 } else { | |
3323 EvacuateNewSpacePageVisitor::MoveToToSpace(page); | |
3324 } | |
3325 } | 3305 } |
3326 | |
3327 job.AddPage(page, &abandoned_pages); | 3306 job.AddPage(page, &abandoned_pages); |
3328 } | 3307 } |
3329 DCHECK_GE(job.NumberOfPages(), 1); | 3308 DCHECK_GE(job.NumberOfPages(), 1); |
3330 | 3309 |
3331 // Used for trace summary. | 3310 // Used for trace summary. |
3332 double compaction_speed = 0; | 3311 double compaction_speed = 0; |
3333 if (FLAG_trace_evacuation) { | 3312 if (FLAG_trace_evacuation) { |
3334 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3313 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3335 } | 3314 } |
3336 | 3315 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3370 return map_word.ToForwardingAddress(); | 3349 return map_word.ToForwardingAddress(); |
3371 } | 3350 } |
3372 } | 3351 } |
3373 return object; | 3352 return object; |
3374 } | 3353 } |
3375 }; | 3354 }; |
3376 | 3355 |
3377 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, | 3356 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, |
3378 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, | 3357 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, |
3379 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, | 3358 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, |
3380 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode, | |
3381 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> | 3359 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> |
3382 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, | 3360 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, |
3383 ObjectVisitor* v) { | 3361 ObjectVisitor* v) { |
3384 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); | 3362 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); |
3385 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); | 3363 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); |
3386 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) || | 3364 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
3387 (skip_list_mode == REBUILD_SKIP_LIST)); | 3365 space->identity() == CODE_SPACE); |
3388 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3366 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
3389 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); | 3367 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); |
3390 | 3368 |
3391 // Before we sweep objects on the page, we free dead array buffers which | 3369 // Before we sweep objects on the page, we free dead array buffers which |
3392 // requires valid mark bits. | 3370 // requires valid mark bits. |
3393 ArrayBufferTracker::FreeDead(p); | 3371 ArrayBufferTracker::FreeDead(p); |
3394 | 3372 |
3395 Address free_start = p->area_start(); | 3373 Address free_start = p->area_start(); |
3396 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3374 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
3397 | 3375 |
(...skipping 12 matching lines...) Expand all Loading... |
3410 LiveObjectIterator<kBlackObjects> it(p); | 3388 LiveObjectIterator<kBlackObjects> it(p); |
3411 HeapObject* object = NULL; | 3389 HeapObject* object = NULL; |
3412 while ((object = it.Next()) != NULL) { | 3390 while ((object = it.Next()) != NULL) { |
3413 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3391 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
3414 Address free_end = object->address(); | 3392 Address free_end = object->address(); |
3415 if (free_end != free_start) { | 3393 if (free_end != free_start) { |
3416 int size = static_cast<int>(free_end - free_start); | 3394 int size = static_cast<int>(free_end - free_start); |
3417 if (free_space_mode == ZAP_FREE_SPACE) { | 3395 if (free_space_mode == ZAP_FREE_SPACE) { |
3418 memset(free_start, 0xcc, size); | 3396 memset(free_start, 0xcc, size); |
3419 } | 3397 } |
3420 if (free_list_mode == REBUILD_FREE_LIST) { | 3398 freed_bytes = space->UnaccountedFree(free_start, size); |
3421 freed_bytes = space->UnaccountedFree(free_start, size); | 3399 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
3422 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
3423 } else { | |
3424 p->heap()->CreateFillerObjectAt(free_start, size, | |
3425 ClearRecordedSlots::kNo); | |
3426 } | |
3427 } | 3400 } |
3428 Map* map = object->synchronized_map(); | 3401 Map* map = object->synchronized_map(); |
3429 int size = object->SizeFromMap(map); | 3402 int size = object->SizeFromMap(map); |
3430 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { | 3403 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
3431 object->IterateBody(map->instance_type(), size, v); | 3404 object->IterateBody(map->instance_type(), size, v); |
3432 } | 3405 } |
3433 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { | 3406 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
3434 int new_region_start = SkipList::RegionNumber(free_end); | 3407 int new_region_start = SkipList::RegionNumber(free_end); |
3435 int new_region_end = | 3408 int new_region_end = |
3436 SkipList::RegionNumber(free_end + size - kPointerSize); | 3409 SkipList::RegionNumber(free_end + size - kPointerSize); |
3437 if (new_region_start != curr_region || new_region_end != curr_region) { | 3410 if (new_region_start != curr_region || new_region_end != curr_region) { |
3438 skip_list->AddObject(free_end, size); | 3411 skip_list->AddObject(free_end, size); |
3439 curr_region = new_region_end; | 3412 curr_region = new_region_end; |
3440 } | 3413 } |
3441 } | 3414 } |
3442 free_start = free_end + size; | 3415 free_start = free_end + size; |
3443 } | 3416 } |
3444 | 3417 |
3445 // Clear the mark bits of that page and reset live bytes count. | 3418 // Clear the mark bits of that page and reset live bytes count. |
3446 Bitmap::Clear(p); | 3419 Bitmap::Clear(p); |
3447 | 3420 |
3448 if (free_start != p->area_end()) { | 3421 if (free_start != p->area_end()) { |
3449 int size = static_cast<int>(p->area_end() - free_start); | 3422 int size = static_cast<int>(p->area_end() - free_start); |
3450 if (free_space_mode == ZAP_FREE_SPACE) { | 3423 if (free_space_mode == ZAP_FREE_SPACE) { |
3451 memset(free_start, 0xcc, size); | 3424 memset(free_start, 0xcc, size); |
3452 } | 3425 } |
3453 if (free_list_mode == REBUILD_FREE_LIST) { | 3426 freed_bytes = space->UnaccountedFree(free_start, size); |
3454 freed_bytes = space->UnaccountedFree(free_start, size); | 3427 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
3455 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
3456 } else { | |
3457 p->heap()->CreateFillerObjectAt(free_start, size, | |
3458 ClearRecordedSlots::kNo); | |
3459 } | |
3460 } | 3428 } |
3461 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3429 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3462 if (free_list_mode == IGNORE_FREE_LIST) return 0; | |
3463 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3430 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
3464 } | 3431 } |
3465 | 3432 |
3466 void MarkCompactCollector::InvalidateCode(Code* code) { | 3433 void MarkCompactCollector::InvalidateCode(Code* code) { |
3467 if (heap_->incremental_marking()->IsCompacting() && | 3434 if (heap_->incremental_marking()->IsCompacting() && |
3468 !ShouldSkipEvacuationSlotRecording(code)) { | 3435 !ShouldSkipEvacuationSlotRecording(code)) { |
3469 DCHECK(compacting_); | 3436 DCHECK(compacting_); |
3470 | 3437 |
3471 // If the object is white than no slots were recorded on it yet. | 3438 // If the object is white than no slots were recorded on it yet. |
3472 MarkBit mark_bit = Marking::MarkBitFrom(code); | 3439 MarkBit mark_bit = Marking::MarkBitFrom(code); |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3568 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3535 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
3569 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3536 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
3570 Heap::RelocationLock relocation_lock(heap()); | 3537 Heap::RelocationLock relocation_lock(heap()); |
3571 | 3538 |
3572 { | 3539 { |
3573 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | 3540 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
3574 EvacuationScope evacuation_scope(this); | 3541 EvacuationScope evacuation_scope(this); |
3575 | 3542 |
3576 EvacuateNewSpacePrologue(); | 3543 EvacuateNewSpacePrologue(); |
3577 EvacuatePagesInParallel(); | 3544 EvacuatePagesInParallel(); |
| 3545 EvacuateNewSpaceEpilogue(); |
3578 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | 3546 heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
3579 } | 3547 } |
3580 | 3548 |
3581 UpdatePointersAfterEvacuation(); | 3549 UpdatePointersAfterEvacuation(); |
3582 | 3550 |
3583 if (!heap()->new_space()->Rebalance()) { | |
3584 FatalProcessOutOfMemory("NewSpace::Rebalance"); | |
3585 } | |
3586 | |
3587 // Give pages that are queued to be freed back to the OS. Note that filtering | 3551 // Give pages that are queued to be freed back to the OS. Note that filtering |
3588 // slots only handles old space (for unboxed doubles), and thus map space can | 3552 // slots only handles old space (for unboxed doubles), and thus map space can |
3589 // still contain stale pointers. We only free the chunks after pointer updates | 3553 // still contain stale pointers. We only free the chunks after pointer updates |
3590 // to still have access to page headers. | 3554 // to still have access to page headers. |
3591 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3555 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
3592 | 3556 |
3593 { | 3557 { |
3594 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3558 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
3595 | 3559 |
3596 for (Page* p : newspace_evacuation_candidates_) { | |
3597 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | |
3598 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); | |
3599 sweeper().AddLatePage(p->owner()->identity(), p); | |
3600 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { | |
3601 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); | |
3602 p->ForAllFreeListCategories( | |
3603 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); | |
3604 sweeper().AddLatePage(p->owner()->identity(), p); | |
3605 } | |
3606 } | |
3607 newspace_evacuation_candidates_.Rewind(0); | |
3608 | |
3609 for (Page* p : evacuation_candidates_) { | 3560 for (Page* p : evacuation_candidates_) { |
3610 // Important: skip list should be cleared only after roots were updated | 3561 // Important: skip list should be cleared only after roots were updated |
3611 // because root iteration traverses the stack and might have to find | 3562 // because root iteration traverses the stack and might have to find |
3612 // code objects from non-updated pc pointing into evacuation candidate. | 3563 // code objects from non-updated pc pointing into evacuation candidate. |
3613 SkipList* list = p->skip_list(); | 3564 SkipList* list = p->skip_list(); |
3614 if (list != NULL) list->Clear(); | 3565 if (list != NULL) list->Clear(); |
3615 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3566 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
3616 sweeper().AddLatePage(p->owner()->identity(), p); | 3567 sweeper().AddLatePage(p->owner()->identity(), p); |
3617 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); | 3568 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
3618 } | 3569 } |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3693 // Update the corresponding slot. | 3644 // Update the corresponding slot. |
3694 *slot = map_word.ToForwardingAddress(); | 3645 *slot = map_word.ToForwardingAddress(); |
3695 } | 3646 } |
3696 // If the object was in from space before and is after executing the | 3647 // If the object was in from space before and is after executing the |
3697 // callback in to space, the object is still live. | 3648 // callback in to space, the object is still live. |
3698 // Unfortunately, we do not know about the slot. It could be in a | 3649 // Unfortunately, we do not know about the slot. It could be in a |
3699 // just freed free space object. | 3650 // just freed free space object. |
3700 if (heap->InToSpace(*slot)) { | 3651 if (heap->InToSpace(*slot)) { |
3701 return KEEP_SLOT; | 3652 return KEEP_SLOT; |
3702 } | 3653 } |
3703 } else if (heap->InToSpace(*slot)) { | |
3704 DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address()) | |
3705 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)); | |
3706 // Slots can be in "to" space after a page has been moved. Since there is | |
3707 // no forwarding information present we need to check the markbits to | |
3708 // determine liveness. | |
3709 if (Marking::IsBlack( | |
3710 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot)))) | |
3711 return KEEP_SLOT; | |
3712 } else { | 3654 } else { |
3713 DCHECK(!heap->InNewSpace(*slot)); | 3655 DCHECK(!heap->InNewSpace(*slot)); |
3714 } | 3656 } |
3715 return REMOVE_SLOT; | 3657 return REMOVE_SLOT; |
3716 } | 3658 } |
3717 }; | 3659 }; |
3718 | 3660 |
3719 int NumberOfPointerUpdateTasks(int pages) { | 3661 int NumberOfPointerUpdateTasks(int pages) { |
3720 if (!FLAG_parallel_pointer_update) return 1; | 3662 if (!FLAG_parallel_pointer_update) return 1; |
3721 const int kMaxTasks = 4; | 3663 const int kMaxTasks = 4; |
(...skipping 12 matching lines...) Expand all Loading... |
3734 job.Run(num_tasks, [](int i) { return 0; }); | 3676 job.Run(num_tasks, [](int i) { return 0; }); |
3735 } | 3677 } |
3736 | 3678 |
3737 class ToSpacePointerUpdateJobTraits { | 3679 class ToSpacePointerUpdateJobTraits { |
3738 public: | 3680 public: |
3739 typedef std::pair<Address, Address> PerPageData; | 3681 typedef std::pair<Address, Address> PerPageData; |
3740 typedef PointersUpdatingVisitor* PerTaskData; | 3682 typedef PointersUpdatingVisitor* PerTaskData; |
3741 | 3683 |
3742 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 3684 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
3743 MemoryChunk* chunk, PerPageData limits) { | 3685 MemoryChunk* chunk, PerPageData limits) { |
3744 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | |
3745 // New->new promoted pages contain garbage so they require iteration | |
3746 // using markbits. | |
3747 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); | |
3748 } else { | |
3749 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); | |
3750 } | |
3751 return true; | |
3752 } | |
3753 | |
3754 static const bool NeedSequentialFinalization = false; | |
3755 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | |
3756 } | |
3757 | |
3758 private: | |
3759 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, | |
3760 MemoryChunk* chunk, | |
3761 PerPageData limits) { | |
3762 for (Address cur = limits.first; cur < limits.second;) { | 3686 for (Address cur = limits.first; cur < limits.second;) { |
3763 HeapObject* object = HeapObject::FromAddress(cur); | 3687 HeapObject* object = HeapObject::FromAddress(cur); |
3764 Map* map = object->map(); | 3688 Map* map = object->map(); |
3765 int size = object->SizeFromMap(map); | 3689 int size = object->SizeFromMap(map); |
3766 object->IterateBody(map->instance_type(), size, visitor); | 3690 object->IterateBody(map->instance_type(), size, visitor); |
3767 cur += size; | 3691 cur += size; |
3768 } | 3692 } |
| 3693 return true; |
3769 } | 3694 } |
3770 | 3695 static const bool NeedSequentialFinalization = false; |
3771 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, | 3696 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
3772 MemoryChunk* chunk, | |
3773 PerPageData limits) { | |
3774 LiveObjectIterator<kBlackObjects> it(chunk); | |
3775 HeapObject* object = NULL; | |
3776 while ((object = it.Next()) != NULL) { | |
3777 Map* map = object->map(); | |
3778 int size = object->SizeFromMap(map); | |
3779 object->IterateBody(map->instance_type(), size, visitor); | |
3780 } | |
3781 } | 3697 } |
3782 }; | 3698 }; |
3783 | 3699 |
3784 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 3700 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
3785 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 3701 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
3786 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 3702 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
3787 Address space_start = heap->new_space()->bottom(); | 3703 Address space_start = heap->new_space()->bottom(); |
3788 Address space_end = heap->new_space()->top(); | 3704 Address space_end = heap->new_space()->top(); |
3789 NewSpacePageIterator it(space_start, space_end); | 3705 NewSpacePageIterator it(space_start, space_end); |
3790 while (it.has_next()) { | 3706 while (it.has_next()) { |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3846 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3762 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
3847 } | 3763 } |
3848 | 3764 |
3849 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, | 3765 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, |
3850 int required_freed_bytes, | 3766 int required_freed_bytes, |
3851 int max_pages) { | 3767 int max_pages) { |
3852 int max_freed = 0; | 3768 int max_freed = 0; |
3853 int pages_freed = 0; | 3769 int pages_freed = 0; |
3854 Page* page = nullptr; | 3770 Page* page = nullptr; |
3855 while ((page = GetSweepingPageSafe(identity)) != nullptr) { | 3771 while ((page = GetSweepingPageSafe(identity)) != nullptr) { |
3856 int freed = ParallelSweepPage(page, identity); | 3772 int freed = ParallelSweepPage(page, heap_->paged_space(identity)); |
3857 pages_freed += 1; | 3773 pages_freed += 1; |
3858 DCHECK_GE(freed, 0); | 3774 DCHECK_GE(freed, 0); |
3859 max_freed = Max(max_freed, freed); | 3775 max_freed = Max(max_freed, freed); |
3860 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) | 3776 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) |
3861 return max_freed; | 3777 return max_freed; |
3862 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; | 3778 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; |
3863 } | 3779 } |
3864 return max_freed; | 3780 return max_freed; |
3865 } | 3781 } |
3866 | 3782 |
3867 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, | 3783 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, |
3868 AllocationSpace identity) { | 3784 PagedSpace* space) { |
3869 int max_freed = 0; | 3785 int max_freed = 0; |
3870 if (page->mutex()->TryLock()) { | 3786 if (page->mutex()->TryLock()) { |
3871 // If this page was already swept in the meantime, we can return here. | 3787 // If this page was already swept in the meantime, we can return here. |
3872 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3788 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
3873 page->mutex()->Unlock(); | 3789 page->mutex()->Unlock(); |
3874 return 0; | 3790 return 0; |
3875 } | 3791 } |
3876 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3792 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3877 if (identity == NEW_SPACE) { | 3793 if (space->identity() == OLD_SPACE) { |
3878 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | |
3879 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr); | |
3880 } else if (identity == OLD_SPACE) { | |
3881 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3794 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
3882 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( | 3795 IGNORE_FREE_SPACE>(space, page, NULL); |
3883 heap_->paged_space(identity), page, nullptr); | 3796 } else if (space->identity() == CODE_SPACE) { |
3884 } else if (identity == CODE_SPACE) { | |
3885 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, | 3797 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, |
3886 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( | 3798 IGNORE_FREE_SPACE>(space, page, NULL); |
3887 heap_->paged_space(identity), page, nullptr); | |
3888 } else { | 3799 } else { |
3889 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3800 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
3890 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( | 3801 IGNORE_FREE_SPACE>(space, page, NULL); |
3891 heap_->paged_space(identity), page, nullptr); | |
3892 } | 3802 } |
3893 { | 3803 { |
3894 base::LockGuard<base::Mutex> guard(&mutex_); | 3804 base::LockGuard<base::Mutex> guard(&mutex_); |
3895 swept_list_[identity].Add(page); | 3805 swept_list_[space->identity()].Add(page); |
3896 } | 3806 } |
3897 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3807 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3898 page->mutex()->Unlock(); | 3808 page->mutex()->Unlock(); |
3899 } | 3809 } |
3900 return max_freed; | 3810 return max_freed; |
3901 } | 3811 } |
3902 | 3812 |
3903 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { | 3813 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { |
3904 DCHECK(!sweeping_in_progress_); | 3814 DCHECK(!sweeping_in_progress_); |
3905 PrepareToBeSweptPage(space, page); | 3815 PrepareToBeSweptPage(space, page); |
3906 sweeping_list_[space].push_back(page); | 3816 sweeping_list_[space].push_back(page); |
3907 } | 3817 } |
3908 | 3818 |
3909 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, | 3819 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, |
3910 Page* page) { | 3820 Page* page) { |
3911 DCHECK(sweeping_in_progress_); | 3821 DCHECK(sweeping_in_progress_); |
3912 PrepareToBeSweptPage(space, page); | 3822 PrepareToBeSweptPage(space, page); |
3913 late_pages_ = true; | 3823 late_pages_ = true; |
3914 AddSweepingPageSafe(space, page); | 3824 AddSweepingPageSafe(space, page); |
3915 } | 3825 } |
3916 | 3826 |
3917 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, | 3827 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, |
3918 Page* page) { | 3828 Page* page) { |
3919 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3829 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
3920 int to_sweep = page->area_size() - page->LiveBytes(); | 3830 int to_sweep = page->area_size() - page->LiveBytes(); |
3921 if (space != NEW_SPACE) | 3831 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); |
3922 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); | |
3923 } | 3832 } |
3924 | 3833 |
3925 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( | 3834 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( |
3926 AllocationSpace space) { | 3835 AllocationSpace space) { |
3927 base::LockGuard<base::Mutex> guard(&mutex_); | 3836 base::LockGuard<base::Mutex> guard(&mutex_); |
3928 Page* page = nullptr; | 3837 Page* page = nullptr; |
3929 if (!sweeping_list_[space].empty()) { | 3838 if (!sweeping_list_[space].empty()) { |
3930 page = sweeping_list_[space].front(); | 3839 page = sweeping_list_[space].front(); |
3931 sweeping_list_[space].pop_front(); | 3840 sweeping_list_[space].pop_front(); |
3932 } | 3841 } |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3976 continue; | 3885 continue; |
3977 } | 3886 } |
3978 | 3887 |
3979 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3888 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
3980 // We need to sweep the page to get it into an iterable state again. Note | 3889 // We need to sweep the page to get it into an iterable state again. Note |
3981 // that this adds unusable memory into the free list that is later on | 3890 // that this adds unusable memory into the free list that is later on |
3982 // (in the free list) dropped again. Since we only use the flag for | 3891 // (in the free list) dropped again. Since we only use the flag for |
3983 // testing this is fine. | 3892 // testing this is fine. |
3984 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3893 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3985 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | 3894 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
3986 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST, | 3895 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( |
3987 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); | 3896 space, p, nullptr); |
3988 continue; | 3897 continue; |
3989 } | 3898 } |
3990 | 3899 |
3991 // One unused page is kept, all further are released before sweeping them. | 3900 // One unused page is kept, all further are released before sweeping them. |
3992 if (p->LiveBytes() == 0) { | 3901 if (p->LiveBytes() == 0) { |
3993 if (unused_page_present) { | 3902 if (unused_page_present) { |
3994 if (FLAG_gc_verbose) { | 3903 if (FLAG_gc_verbose) { |
3995 PrintIsolate(isolate(), "sweeping: released page: %p", | 3904 PrintIsolate(isolate(), "sweeping: released page: %p", |
3996 static_cast<void*>(p)); | 3905 static_cast<void*>(p)); |
3997 } | 3906 } |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4083 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3992 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4084 if (Marking::IsBlack(mark_bit)) { | 3993 if (Marking::IsBlack(mark_bit)) { |
4085 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3994 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
4086 RecordRelocSlot(host, &rinfo, target); | 3995 RecordRelocSlot(host, &rinfo, target); |
4087 } | 3996 } |
4088 } | 3997 } |
4089 } | 3998 } |
4090 | 3999 |
4091 } // namespace internal | 4000 } // namespace internal |
4092 } // namespace v8 | 4001 } // namespace v8 |
OLD | NEW |