| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 453 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 464 AllocationSpace space_to_start) | 464 AllocationSpace space_to_start) |
| 465 : sweeper_(sweeper), | 465 : sweeper_(sweeper), |
| 466 pending_sweeper_tasks_(pending_sweeper_tasks), | 466 pending_sweeper_tasks_(pending_sweeper_tasks), |
| 467 space_to_start_(space_to_start) {} | 467 space_to_start_(space_to_start) {} |
| 468 | 468 |
| 469 virtual ~SweeperTask() {} | 469 virtual ~SweeperTask() {} |
| 470 | 470 |
| 471 private: | 471 private: |
| 472 // v8::Task overrides. | 472 // v8::Task overrides. |
| 473 void Run() override { | 473 void Run() override { |
| 474 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); | 474 DCHECK_GE(space_to_start_, FIRST_SPACE); |
| 475 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 475 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
| 476 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 476 const int offset = space_to_start_ - FIRST_SPACE; |
| 477 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 477 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1; |
| 478 for (int i = 0; i < num_spaces; i++) { | 478 for (int i = 0; i < num_spaces; i++) { |
| 479 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 479 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces); |
| 480 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 480 DCHECK_GE(space_id, FIRST_SPACE); |
| 481 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 481 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
| 482 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); | 482 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); |
| 483 } | 483 } |
| 484 pending_sweeper_tasks_->Signal(); | 484 pending_sweeper_tasks_->Signal(); |
| 485 } | 485 } |
| 486 | 486 |
| 487 Sweeper* sweeper_; | 487 Sweeper* sweeper_; |
| 488 base::Semaphore* pending_sweeper_tasks_; | 488 base::Semaphore* pending_sweeper_tasks_; |
| 489 AllocationSpace space_to_start_; | 489 AllocationSpace space_to_start_; |
| 490 | 490 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 508 void MarkCompactCollector::Sweeper::StartSweepingHelper( | 508 void MarkCompactCollector::Sweeper::StartSweepingHelper( |
| 509 AllocationSpace space_to_start) { | 509 AllocationSpace space_to_start) { |
| 510 num_sweeping_tasks_.Increment(1); | 510 num_sweeping_tasks_.Increment(1); |
| 511 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 511 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 512 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), | 512 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), |
| 513 v8::Platform::kShortRunningTask); | 513 v8::Platform::kShortRunningTask); |
| 514 } | 514 } |
| 515 | 515 |
| 516 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( | 516 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
| 517 Page* page) { | 517 Page* page) { |
| 518 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | |
| 519 if (!page->SweepingDone()) { | 518 if (!page->SweepingDone()) { |
| 520 ParallelSweepPage(page, owner); | 519 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
| 520 ParallelSweepPage(page, owner->identity()); |
| 521 if (!page->SweepingDone()) { | 521 if (!page->SweepingDone()) { |
| 522 // We were not able to sweep that page, i.e., a concurrent | 522 // We were not able to sweep that page, i.e., a concurrent |
| 523 // sweeper thread currently owns this page. Wait for the sweeper | 523 // sweeper thread currently owns this page. Wait for the sweeper |
| 524 // thread to be done with this page. | 524 // thread to be done with this page. |
| 525 page->WaitUntilSweepingCompleted(); | 525 page->WaitUntilSweepingCompleted(); |
| 526 } | 526 } |
| 527 } | 527 } |
| 528 } | 528 } |
| 529 | 529 |
| 530 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 530 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
| (...skipping 22 matching lines...) Expand all Loading... |
| 553 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); | 553 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); |
| 554 } | 554 } |
| 555 | 555 |
| 556 if (FLAG_concurrent_sweeping) { | 556 if (FLAG_concurrent_sweeping) { |
| 557 while (num_sweeping_tasks_.Value() > 0) { | 557 while (num_sweeping_tasks_.Value() > 0) { |
| 558 pending_sweeper_tasks_semaphore_.Wait(); | 558 pending_sweeper_tasks_semaphore_.Wait(); |
| 559 num_sweeping_tasks_.Increment(-1); | 559 num_sweeping_tasks_.Increment(-1); |
| 560 } | 560 } |
| 561 } | 561 } |
| 562 | 562 |
| 563 ForAllSweepingSpaces( | 563 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 564 [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); }); | 564 if (space == NEW_SPACE) { |
| 565 swept_list_[NEW_SPACE].Clear(); |
| 566 } |
| 567 DCHECK(sweeping_list_[space].empty()); |
| 568 }); |
| 565 late_pages_ = false; | 569 late_pages_ = false; |
| 566 sweeping_in_progress_ = false; | 570 sweeping_in_progress_ = false; |
| 567 } | 571 } |
| 568 | 572 |
| 573 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { |
| 574 if (!sweeping_in_progress_) return; |
| 575 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
| 576 NewSpacePageIterator pit(heap_->new_space()); |
| 577 while (pit.has_next()) { |
| 578 Page* page = pit.next(); |
| 579 SweepOrWaitUntilSweepingCompleted(page); |
| 580 } |
| 581 } |
| 582 } |
| 583 |
| 569 void MarkCompactCollector::EnsureSweepingCompleted() { | 584 void MarkCompactCollector::EnsureSweepingCompleted() { |
| 570 if (!sweeper().sweeping_in_progress()) return; | 585 if (!sweeper().sweeping_in_progress()) return; |
| 571 | 586 |
| 572 sweeper().EnsureCompleted(); | 587 sweeper().EnsureCompleted(); |
| 573 heap()->old_space()->RefillFreeList(); | 588 heap()->old_space()->RefillFreeList(); |
| 574 heap()->code_space()->RefillFreeList(); | 589 heap()->code_space()->RefillFreeList(); |
| 575 heap()->map_space()->RefillFreeList(); | 590 heap()->map_space()->RefillFreeList(); |
| 576 | 591 |
| 577 #ifdef VERIFY_HEAP | 592 #ifdef VERIFY_HEAP |
| 578 if (FLAG_verify_heap && !evacuation()) { | 593 if (FLAG_verify_heap && !evacuation()) { |
| (...skipping 1293 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1872 AllocationSpace space_to_allocate_; | 1887 AllocationSpace space_to_allocate_; |
| 1873 intptr_t promoted_size_; | 1888 intptr_t promoted_size_; |
| 1874 intptr_t semispace_copied_size_; | 1889 intptr_t semispace_copied_size_; |
| 1875 base::HashMap* local_pretenuring_feedback_; | 1890 base::HashMap* local_pretenuring_feedback_; |
| 1876 }; | 1891 }; |
| 1877 | 1892 |
| 1878 class MarkCompactCollector::EvacuateNewSpacePageVisitor final | 1893 class MarkCompactCollector::EvacuateNewSpacePageVisitor final |
| 1879 : public MarkCompactCollector::HeapObjectVisitor { | 1894 : public MarkCompactCollector::HeapObjectVisitor { |
| 1880 public: | 1895 public: |
| 1881 explicit EvacuateNewSpacePageVisitor(Heap* heap) | 1896 explicit EvacuateNewSpacePageVisitor(Heap* heap) |
| 1882 : heap_(heap), promoted_size_(0) {} | 1897 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {} |
| 1883 | 1898 |
| 1884 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { | 1899 static void MoveToOldSpace(Page* page, PagedSpace* owner) { |
| 1885 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { | 1900 page->Unlink(); |
| 1886 Page* new_page = Page::ConvertNewToOld(page, owner); | 1901 Page* new_page = Page::ConvertNewToOld(page, owner); |
| 1887 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); | 1902 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); |
| 1888 } | 1903 } |
| 1904 |
| 1905 static void MoveToToSpace(Page* page) { |
| 1906 page->heap()->new_space()->MovePageFromSpaceToSpace(page); |
| 1907 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION); |
| 1889 } | 1908 } |
| 1890 | 1909 |
| 1891 inline bool Visit(HeapObject* object) { | 1910 inline bool Visit(HeapObject* object) { |
| 1892 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); | 1911 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); |
| 1893 object->IterateBodyFast(&visitor); | 1912 object->IterateBodyFast(&visitor); |
| 1894 promoted_size_ += object->Size(); | 1913 promoted_size_ += object->Size(); |
| 1895 return true; | 1914 return true; |
| 1896 } | 1915 } |
| 1897 | 1916 |
| 1898 intptr_t promoted_size() { return promoted_size_; } | 1917 intptr_t promoted_size() { return promoted_size_; } |
| 1918 intptr_t semispace_copied_size() { return semispace_copied_size_; } |
| 1919 |
| 1920 void account_semispace_copied(intptr_t copied) { |
| 1921 semispace_copied_size_ += copied; |
| 1922 } |
| 1899 | 1923 |
| 1900 private: | 1924 private: |
| 1901 Heap* heap_; | 1925 Heap* heap_; |
| 1902 intptr_t promoted_size_; | 1926 intptr_t promoted_size_; |
| 1927 intptr_t semispace_copied_size_; |
| 1903 }; | 1928 }; |
| 1904 | 1929 |
| 1905 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1930 class MarkCompactCollector::EvacuateOldSpaceVisitor final |
| 1906 : public MarkCompactCollector::EvacuateVisitorBase { | 1931 : public MarkCompactCollector::EvacuateVisitorBase { |
| 1907 public: | 1932 public: |
| 1908 EvacuateOldSpaceVisitor(Heap* heap, | 1933 EvacuateOldSpaceVisitor(Heap* heap, |
| 1909 CompactionSpaceCollection* compaction_spaces) | 1934 CompactionSpaceCollection* compaction_spaces) |
| 1910 : EvacuateVisitorBase(heap, compaction_spaces) {} | 1935 : EvacuateVisitorBase(heap, compaction_spaces) {} |
| 1911 | 1936 |
| 1912 inline bool Visit(HeapObject* object) override { | 1937 inline bool Visit(HeapObject* object) override { |
| (...skipping 1119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3032 NewSpace* new_space = heap()->new_space(); | 3057 NewSpace* new_space = heap()->new_space(); |
| 3033 NewSpacePageIterator it(new_space->bottom(), new_space->top()); | 3058 NewSpacePageIterator it(new_space->bottom(), new_space->top()); |
| 3034 // Append the list of new space pages to be processed. | 3059 // Append the list of new space pages to be processed. |
| 3035 while (it.has_next()) { | 3060 while (it.has_next()) { |
| 3036 newspace_evacuation_candidates_.Add(it.next()); | 3061 newspace_evacuation_candidates_.Add(it.next()); |
| 3037 } | 3062 } |
| 3038 new_space->Flip(); | 3063 new_space->Flip(); |
| 3039 new_space->ResetAllocationInfo(); | 3064 new_space->ResetAllocationInfo(); |
| 3040 } | 3065 } |
| 3041 | 3066 |
| 3042 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { | |
| 3043 newspace_evacuation_candidates_.Rewind(0); | |
| 3044 } | |
| 3045 | |
| 3046 class MarkCompactCollector::Evacuator : public Malloced { | 3067 class MarkCompactCollector::Evacuator : public Malloced { |
| 3047 public: | 3068 public: |
| 3069 enum EvacuationMode { |
| 3070 kObjectsNewToOld, |
| 3071 kPageNewToOld, |
| 3072 kObjectsOldToOld, |
| 3073 kPageNewToNew, |
| 3074 }; |
| 3075 |
| 3076 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { |
| 3077 // Note: The order of checks is important in this function. |
| 3078 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) |
| 3079 return kPageNewToOld; |
| 3080 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION)) |
| 3081 return kPageNewToNew; |
| 3082 if (chunk->InNewSpace()) return kObjectsNewToOld; |
| 3083 DCHECK(chunk->IsEvacuationCandidate()); |
| 3084 return kObjectsOldToOld; |
| 3085 } |
| 3086 |
| 3048 // NewSpacePages with more live bytes than this threshold qualify for fast | 3087 // NewSpacePages with more live bytes than this threshold qualify for fast |
| 3049 // evacuation. | 3088 // evacuation. |
| 3050 static int PageEvacuationThreshold() { | 3089 static int PageEvacuationThreshold() { |
| 3051 if (FLAG_page_promotion) | 3090 if (FLAG_page_promotion) |
| 3052 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; | 3091 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; |
| 3053 return Page::kAllocatableMemory + kPointerSize; | 3092 return Page::kAllocatableMemory + kPointerSize; |
| 3054 } | 3093 } |
| 3055 | 3094 |
| 3056 explicit Evacuator(MarkCompactCollector* collector) | 3095 explicit Evacuator(MarkCompactCollector* collector) |
| 3057 : collector_(collector), | 3096 : collector_(collector), |
| 3058 compaction_spaces_(collector->heap()), | 3097 compaction_spaces_(collector->heap()), |
| 3059 local_pretenuring_feedback_(base::HashMap::PointersMatch, | 3098 local_pretenuring_feedback_(base::HashMap::PointersMatch, |
| 3060 kInitialLocalPretenuringFeedbackCapacity), | 3099 kInitialLocalPretenuringFeedbackCapacity), |
| 3061 new_space_visitor_(collector->heap(), &compaction_spaces_, | 3100 new_space_visitor_(collector->heap(), &compaction_spaces_, |
| 3062 &local_pretenuring_feedback_), | 3101 &local_pretenuring_feedback_), |
| 3063 new_space_page_visitor(collector->heap()), | 3102 new_space_page_visitor(collector->heap()), |
| 3064 old_space_visitor_(collector->heap(), &compaction_spaces_), | 3103 old_space_visitor_(collector->heap(), &compaction_spaces_), |
| 3065 duration_(0.0), | 3104 duration_(0.0), |
| 3066 bytes_compacted_(0) {} | 3105 bytes_compacted_(0) {} |
| 3067 | 3106 |
| 3068 inline bool EvacuatePage(Page* chunk); | 3107 inline bool EvacuatePage(Page* chunk); |
| 3069 | 3108 |
| 3070 // Merge back locally cached info sequentially. Note that this method needs | 3109 // Merge back locally cached info sequentially. Note that this method needs |
| 3071 // to be called from the main thread. | 3110 // to be called from the main thread. |
| 3072 inline void Finalize(); | 3111 inline void Finalize(); |
| 3073 | 3112 |
| 3074 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | 3113 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } |
| 3075 | 3114 |
| 3076 private: | 3115 private: |
| 3077 enum EvacuationMode { | |
| 3078 kObjectsNewToOld, | |
| 3079 kPageNewToOld, | |
| 3080 kObjectsOldToOld, | |
| 3081 }; | |
| 3082 | |
| 3083 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | 3116 static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
| 3084 | 3117 |
| 3085 inline Heap* heap() { return collector_->heap(); } | 3118 inline Heap* heap() { return collector_->heap(); } |
| 3086 | 3119 |
| 3087 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) { | |
| 3088 // Note: The order of checks is important in this function. | |
| 3089 if (chunk->InNewSpace()) return kObjectsNewToOld; | |
| 3090 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) | |
| 3091 return kPageNewToOld; | |
| 3092 DCHECK(chunk->IsEvacuationCandidate()); | |
| 3093 return kObjectsOldToOld; | |
| 3094 } | |
| 3095 | |
| 3096 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { | 3120 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { |
| 3097 duration_ += duration; | 3121 duration_ += duration; |
| 3098 bytes_compacted_ += bytes_compacted; | 3122 bytes_compacted_ += bytes_compacted; |
| 3099 } | 3123 } |
| 3100 | 3124 |
| 3101 template <IterationMode mode, class Visitor> | |
| 3102 inline bool EvacuateSinglePage(Page* p, Visitor* visitor); | |
| 3103 | |
| 3104 MarkCompactCollector* collector_; | 3125 MarkCompactCollector* collector_; |
| 3105 | 3126 |
| 3106 // Locally cached collector data. | 3127 // Locally cached collector data. |
| 3107 CompactionSpaceCollection compaction_spaces_; | 3128 CompactionSpaceCollection compaction_spaces_; |
| 3108 base::HashMap local_pretenuring_feedback_; | 3129 base::HashMap local_pretenuring_feedback_; |
| 3109 | 3130 |
| 3110 // Visitors for the corresponding spaces. | 3131 // Visitors for the corresponding spaces. |
| 3111 EvacuateNewSpaceVisitor new_space_visitor_; | 3132 EvacuateNewSpaceVisitor new_space_visitor_; |
| 3112 EvacuateNewSpacePageVisitor new_space_page_visitor; | 3133 EvacuateNewSpacePageVisitor new_space_page_visitor; |
| 3113 EvacuateOldSpaceVisitor old_space_visitor_; | 3134 EvacuateOldSpaceVisitor old_space_visitor_; |
| 3114 | 3135 |
| 3115 // Book keeping info. | 3136 // Book keeping info. |
| 3116 double duration_; | 3137 double duration_; |
| 3117 intptr_t bytes_compacted_; | 3138 intptr_t bytes_compacted_; |
| 3118 }; | 3139 }; |
| 3119 | 3140 |
| 3120 template <MarkCompactCollector::IterationMode mode, class Visitor> | 3141 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { |
| 3121 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p, | |
| 3122 Visitor* visitor) { | |
| 3123 bool success = false; | 3142 bool success = false; |
| 3124 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || | 3143 DCHECK(page->SweepingDone()); |
| 3125 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); | 3144 int saved_live_bytes = page->LiveBytes(); |
| 3126 int saved_live_bytes = p->LiveBytes(); | 3145 double evacuation_time = 0.0; |
| 3127 double evacuation_time; | 3146 Heap* heap = page->heap(); |
| 3128 { | 3147 { |
| 3129 AlwaysAllocateScope always_allocate(heap()->isolate()); | 3148 AlwaysAllocateScope always_allocate(heap->isolate()); |
| 3130 TimedScope timed_scope(&evacuation_time); | 3149 TimedScope timed_scope(&evacuation_time); |
| 3131 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); | 3150 switch (ComputeEvacuationMode(page)) { |
| 3151 case kObjectsNewToOld: |
| 3152 success = collector_->VisitLiveObjects(page, &new_space_visitor_, |
| 3153 kClearMarkbits); |
| 3154 ArrayBufferTracker::ProcessBuffers( |
| 3155 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3156 DCHECK(success); |
| 3157 break; |
| 3158 case kPageNewToOld: |
| 3159 success = collector_->VisitLiveObjects(page, &new_space_page_visitor, |
| 3160 kKeepMarking); |
| 3161 // ArrayBufferTracker will be updated during sweeping. |
| 3162 DCHECK(success); |
| 3163 break; |
| 3164 case kPageNewToNew: |
| 3165 new_space_page_visitor.account_semispace_copied(page->LiveBytes()); |
| 3166 // ArrayBufferTracker will be updated during sweeping. |
| 3167 success = true; |
| 3168 break; |
| 3169 case kObjectsOldToOld: |
| 3170 success = collector_->VisitLiveObjects(page, &old_space_visitor_, |
| 3171 kClearMarkbits); |
| 3172 if (!success) { |
| 3173 // Aborted compaction page. We have to record slots here, since we |
| 3174 // might not have recorded them in first place. |
| 3175 // Note: We mark the page as aborted here to be able to record slots |
| 3176 // for code objects in |RecordMigratedSlotVisitor|. |
| 3177 page->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| 3178 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); |
| 3179 success = |
| 3180 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking); |
| 3181 ArrayBufferTracker::ProcessBuffers( |
| 3182 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); |
| 3183 DCHECK(success); |
| 3184 // We need to return failure here to indicate that we want this page |
| 3185 // added to the sweeper. |
| 3186 success = false; |
| 3187 } else { |
| 3188 ArrayBufferTracker::ProcessBuffers( |
| 3189 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); |
| 3190 } |
| 3191 break; |
| 3192 default: |
| 3193 UNREACHABLE(); |
| 3194 } |
| 3132 } | 3195 } |
| 3196 ReportCompactionProgress(evacuation_time, saved_live_bytes); |
| 3133 if (FLAG_trace_evacuation) { | 3197 if (FLAG_trace_evacuation) { |
| 3134 const char age_mark_tag = | 3198 PrintIsolate(heap->isolate(), |
| 3135 !p->InNewSpace() | 3199 "evacuation[%p]: page=%p new_space=%d " |
| 3136 ? 'x' | 3200 "page_evacuation=%d executable=%d contains_age_mark=%d " |
| 3137 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) | 3201 "live_bytes=%d time=%f\n", |
| 3138 ? '>' | 3202 static_cast<void*>(this), static_cast<void*>(page), |
| 3139 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' | 3203 page->InNewSpace(), |
| 3140 : '#'; | 3204 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) || |
| 3141 PrintIsolate(heap()->isolate(), | 3205 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION), |
| 3142 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " | 3206 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE), |
| 3143 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", | 3207 page->Contains(heap->new_space()->age_mark()), |
| 3144 static_cast<void*>(this), static_cast<void*>(p), | 3208 saved_live_bytes, evacuation_time); |
| 3145 p->InNewSpace(), age_mark_tag, | |
| 3146 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION), | |
| 3147 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, | |
| 3148 evacuation_time); | |
| 3149 } | |
| 3150 if (success) { | |
| 3151 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
| 3152 } | 3209 } |
| 3153 return success; | 3210 return success; |
| 3154 } | 3211 } |
| 3155 | 3212 |
| 3156 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) { | |
| 3157 bool result = false; | |
| 3158 DCHECK(page->SweepingDone()); | |
| 3159 switch (ComputeEvacuationMode(page)) { | |
| 3160 case kObjectsNewToOld: | |
| 3161 result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_); | |
| 3162 ArrayBufferTracker::ProcessBuffers( | |
| 3163 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3164 DCHECK(result); | |
| 3165 USE(result); | |
| 3166 break; | |
| 3167 case kPageNewToOld: | |
| 3168 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor); | |
| 3169 // ArrayBufferTracker will be updated during sweeping. | |
| 3170 DCHECK(result); | |
| 3171 USE(result); | |
| 3172 break; | |
| 3173 case kObjectsOldToOld: | |
| 3174 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_); | |
| 3175 if (!result) { | |
| 3176 // Aborted compaction page. We have to record slots here, since we might | |
| 3177 // not have recorded them in first place. | |
| 3178 // Note: We mark the page as aborted here to be able to record slots | |
| 3179 // for code objects in |RecordMigratedSlotVisitor|. | |
| 3180 page->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
| 3181 EvacuateRecordOnlyVisitor record_visitor(collector_->heap()); | |
| 3182 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor); | |
| 3183 ArrayBufferTracker::ProcessBuffers( | |
| 3184 page, ArrayBufferTracker::kUpdateForwardedKeepOthers); | |
| 3185 DCHECK(result); | |
| 3186 USE(result); | |
| 3187 // We need to return failure here to indicate that we want this page | |
| 3188 // added to the sweeper. | |
| 3189 return false; | |
| 3190 } | |
| 3191 ArrayBufferTracker::ProcessBuffers( | |
| 3192 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); | |
| 3193 | |
| 3194 break; | |
| 3195 default: | |
| 3196 UNREACHABLE(); | |
| 3197 } | |
| 3198 return result; | |
| 3199 } | |
| 3200 | |
| 3201 void MarkCompactCollector::Evacuator::Finalize() { | 3213 void MarkCompactCollector::Evacuator::Finalize() { |
| 3202 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3214 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
| 3203 heap()->code_space()->MergeCompactionSpace( | 3215 heap()->code_space()->MergeCompactionSpace( |
| 3204 compaction_spaces_.Get(CODE_SPACE)); | 3216 compaction_spaces_.Get(CODE_SPACE)); |
| 3205 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3217 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
| 3206 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + | 3218 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + |
| 3207 new_space_page_visitor.promoted_size()); | 3219 new_space_page_visitor.promoted_size()); |
| 3208 heap()->IncrementSemiSpaceCopiedObjectSize( | 3220 heap()->IncrementSemiSpaceCopiedObjectSize( |
| 3209 new_space_visitor_.semispace_copied_size()); | 3221 new_space_visitor_.semispace_copied_size() + |
| 3222 new_space_page_visitor.semispace_copied_size()); |
| 3210 heap()->IncrementYoungSurvivorsCounter( | 3223 heap()->IncrementYoungSurvivorsCounter( |
| 3211 new_space_visitor_.promoted_size() + | 3224 new_space_visitor_.promoted_size() + |
| 3212 new_space_visitor_.semispace_copied_size() + | 3225 new_space_visitor_.semispace_copied_size() + |
| 3213 new_space_page_visitor.promoted_size()); | 3226 new_space_page_visitor.promoted_size() + |
| 3227 new_space_page_visitor.semispace_copied_size()); |
| 3214 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | 3228 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
| 3215 } | 3229 } |
| 3216 | 3230 |
| 3217 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3231 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
| 3218 intptr_t live_bytes) { | 3232 intptr_t live_bytes) { |
| 3219 if (!FLAG_parallel_compaction) return 1; | 3233 if (!FLAG_parallel_compaction) return 1; |
| 3220 // Compute the number of needed tasks based on a target compaction time, the | 3234 // Compute the number of needed tasks based on a target compaction time, the |
| 3221 // profiled compaction speed and marked live memory. | 3235 // profiled compaction speed and marked live memory. |
| 3222 // | 3236 // |
| 3223 // The number of parallel compaction tasks is limited by: | 3237 // The number of parallel compaction tasks is limited by: |
| (...skipping 27 matching lines...) Expand all Loading... |
| 3251 | 3265 |
| 3252 static const bool NeedSequentialFinalization = true; | 3266 static const bool NeedSequentialFinalization = true; |
| 3253 | 3267 |
| 3254 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3268 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
| 3255 MemoryChunk* chunk, PerPageData) { | 3269 MemoryChunk* chunk, PerPageData) { |
| 3256 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); | 3270 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); |
| 3257 } | 3271 } |
| 3258 | 3272 |
| 3259 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, | 3273 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, |
| 3260 bool success, PerPageData data) { | 3274 bool success, PerPageData data) { |
| 3261 if (chunk->InNewSpace()) { | 3275 using Evacuator = MarkCompactCollector::Evacuator; |
| 3262 DCHECK(success); | 3276 Page* p = static_cast<Page*>(chunk); |
| 3263 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { | 3277 switch (Evacuator::ComputeEvacuationMode(p)) { |
| 3264 DCHECK(success); | 3278 case Evacuator::kPageNewToOld: |
| 3265 Page* p = static_cast<Page*>(chunk); | 3279 break; |
| 3266 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); | 3280 case Evacuator::kPageNewToNew: |
| 3267 p->ForAllFreeListCategories( | 3281 DCHECK(success); |
| 3268 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); | 3282 break; |
| 3269 heap->mark_compact_collector()->sweeper().AddLatePage( | 3283 case Evacuator::kObjectsNewToOld: |
| 3270 p->owner()->identity(), p); | 3284 DCHECK(success); |
| 3271 } else { | 3285 break; |
| 3272 Page* p = static_cast<Page*>(chunk); | 3286 case Evacuator::kObjectsOldToOld: |
| 3273 if (success) { | 3287 if (success) { |
| 3274 DCHECK(p->IsEvacuationCandidate()); | 3288 DCHECK(p->IsEvacuationCandidate()); |
| 3275 DCHECK(p->SweepingDone()); | 3289 DCHECK(p->SweepingDone()); |
| 3276 p->Unlink(); | 3290 p->Unlink(); |
| 3277 } else { | 3291 } else { |
| 3278 // We have partially compacted the page, i.e., some objects may have | 3292 // We have partially compacted the page, i.e., some objects may have |
| 3279 // moved, others are still in place. | 3293 // moved, others are still in place. |
| 3280 p->ClearEvacuationCandidate(); | 3294 p->ClearEvacuationCandidate(); |
| 3281 // Slots have already been recorded so we just need to add it to the | 3295 // Slots have already been recorded so we just need to add it to the |
| 3282 // sweeper. | 3296 // sweeper, which will happen after updating pointers. |
| 3283 *data += 1; | 3297 *data += 1; |
| 3284 } | 3298 } |
| 3299 break; |
| 3300 default: |
| 3301 UNREACHABLE(); |
| 3285 } | 3302 } |
| 3286 } | 3303 } |
| 3287 }; | 3304 }; |
| 3288 | 3305 |
| 3289 void MarkCompactCollector::EvacuatePagesInParallel() { | 3306 void MarkCompactCollector::EvacuatePagesInParallel() { |
| 3290 PageParallelJob<EvacuationJobTraits> job( | 3307 PageParallelJob<EvacuationJobTraits> job( |
| 3291 heap_, heap_->isolate()->cancelable_task_manager(), | 3308 heap_, heap_->isolate()->cancelable_task_manager(), |
| 3292 &page_parallel_job_semaphore_); | 3309 &page_parallel_job_semaphore_); |
| 3293 | 3310 |
| 3294 int abandoned_pages = 0; | 3311 int abandoned_pages = 0; |
| 3295 intptr_t live_bytes = 0; | 3312 intptr_t live_bytes = 0; |
| 3296 for (Page* page : evacuation_candidates_) { | 3313 for (Page* page : evacuation_candidates_) { |
| 3297 live_bytes += page->LiveBytes(); | 3314 live_bytes += page->LiveBytes(); |
| 3298 job.AddPage(page, &abandoned_pages); | 3315 job.AddPage(page, &abandoned_pages); |
| 3299 } | 3316 } |
| 3300 | 3317 |
| 3301 const Address age_mark = heap()->new_space()->age_mark(); | 3318 const Address age_mark = heap()->new_space()->age_mark(); |
| 3302 for (Page* page : newspace_evacuation_candidates_) { | 3319 for (Page* page : newspace_evacuation_candidates_) { |
| 3303 live_bytes += page->LiveBytes(); | 3320 live_bytes += page->LiveBytes(); |
| 3304 if (!page->NeverEvacuate() && | 3321 if (!page->NeverEvacuate() && |
| 3305 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && | 3322 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && |
| 3306 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && | |
| 3307 !page->Contains(age_mark)) { | 3323 !page->Contains(age_mark)) { |
| 3308 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space()); | 3324 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { |
| 3325 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); |
| 3326 } else { |
| 3327 EvacuateNewSpacePageVisitor::MoveToToSpace(page); |
| 3328 } |
| 3309 } | 3329 } |
| 3330 |
| 3310 job.AddPage(page, &abandoned_pages); | 3331 job.AddPage(page, &abandoned_pages); |
| 3311 } | 3332 } |
| 3312 DCHECK_GE(job.NumberOfPages(), 1); | 3333 DCHECK_GE(job.NumberOfPages(), 1); |
| 3313 | 3334 |
| 3314 // Used for trace summary. | 3335 // Used for trace summary. |
| 3315 double compaction_speed = 0; | 3336 double compaction_speed = 0; |
| 3316 if (FLAG_trace_evacuation) { | 3337 if (FLAG_trace_evacuation) { |
| 3317 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3338 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| 3318 } | 3339 } |
| 3319 | 3340 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3353 return map_word.ToForwardingAddress(); | 3374 return map_word.ToForwardingAddress(); |
| 3354 } | 3375 } |
| 3355 } | 3376 } |
| 3356 return object; | 3377 return object; |
| 3357 } | 3378 } |
| 3358 }; | 3379 }; |
| 3359 | 3380 |
| 3360 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, | 3381 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, |
| 3361 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, | 3382 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, |
| 3362 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, | 3383 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, |
| 3384 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode, |
| 3363 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> | 3385 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> |
| 3364 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, | 3386 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, |
| 3365 ObjectVisitor* v) { | 3387 ObjectVisitor* v) { |
| 3366 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); | 3388 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); |
| 3367 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); | 3389 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); |
| 3368 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 3390 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) || |
| 3369 space->identity() == CODE_SPACE); | 3391 (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3370 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3392 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3371 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); | 3393 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); |
| 3372 | 3394 |
| 3373 // Before we sweep objects on the page, we free dead array buffers which | 3395 // Before we sweep objects on the page, we free dead array buffers which |
| 3374 // requires valid mark bits. | 3396 // requires valid mark bits. |
| 3375 ArrayBufferTracker::FreeDead(p); | 3397 ArrayBufferTracker::FreeDead(p); |
| 3376 | 3398 |
| 3377 Address free_start = p->area_start(); | 3399 Address free_start = p->area_start(); |
| 3378 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3400 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| 3379 | 3401 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3392 LiveObjectIterator<kBlackObjects> it(p); | 3414 LiveObjectIterator<kBlackObjects> it(p); |
| 3393 HeapObject* object = NULL; | 3415 HeapObject* object = NULL; |
| 3394 while ((object = it.Next()) != NULL) { | 3416 while ((object = it.Next()) != NULL) { |
| 3395 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3417 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3396 Address free_end = object->address(); | 3418 Address free_end = object->address(); |
| 3397 if (free_end != free_start) { | 3419 if (free_end != free_start) { |
| 3398 int size = static_cast<int>(free_end - free_start); | 3420 int size = static_cast<int>(free_end - free_start); |
| 3399 if (free_space_mode == ZAP_FREE_SPACE) { | 3421 if (free_space_mode == ZAP_FREE_SPACE) { |
| 3400 memset(free_start, 0xcc, size); | 3422 memset(free_start, 0xcc, size); |
| 3401 } | 3423 } |
| 3402 freed_bytes = space->UnaccountedFree(free_start, size); | 3424 if (free_list_mode == REBUILD_FREE_LIST) { |
| 3403 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3425 freed_bytes = space->UnaccountedFree(free_start, size); |
| 3426 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 3427 } else { |
| 3428 p->heap()->CreateFillerObjectAt(free_start, size, |
| 3429 ClearRecordedSlots::kNo); |
| 3430 } |
| 3404 } | 3431 } |
| 3405 Map* map = object->synchronized_map(); | 3432 Map* map = object->synchronized_map(); |
| 3406 int size = object->SizeFromMap(map); | 3433 int size = object->SizeFromMap(map); |
| 3407 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { | 3434 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
| 3408 object->IterateBody(map->instance_type(), size, v); | 3435 object->IterateBody(map->instance_type(), size, v); |
| 3409 } | 3436 } |
| 3410 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { | 3437 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
| 3411 int new_region_start = SkipList::RegionNumber(free_end); | 3438 int new_region_start = SkipList::RegionNumber(free_end); |
| 3412 int new_region_end = | 3439 int new_region_end = |
| 3413 SkipList::RegionNumber(free_end + size - kPointerSize); | 3440 SkipList::RegionNumber(free_end + size - kPointerSize); |
| 3414 if (new_region_start != curr_region || new_region_end != curr_region) { | 3441 if (new_region_start != curr_region || new_region_end != curr_region) { |
| 3415 skip_list->AddObject(free_end, size); | 3442 skip_list->AddObject(free_end, size); |
| 3416 curr_region = new_region_end; | 3443 curr_region = new_region_end; |
| 3417 } | 3444 } |
| 3418 } | 3445 } |
| 3419 free_start = free_end + size; | 3446 free_start = free_end + size; |
| 3420 } | 3447 } |
| 3421 | 3448 |
| 3422 // Clear the mark bits of that page and reset live bytes count. | 3449 // Clear the mark bits of that page and reset live bytes count. |
| 3423 Bitmap::Clear(p); | 3450 Bitmap::Clear(p); |
| 3424 | 3451 |
| 3425 if (free_start != p->area_end()) { | 3452 if (free_start != p->area_end()) { |
| 3426 int size = static_cast<int>(p->area_end() - free_start); | 3453 int size = static_cast<int>(p->area_end() - free_start); |
| 3427 if (free_space_mode == ZAP_FREE_SPACE) { | 3454 if (free_space_mode == ZAP_FREE_SPACE) { |
| 3428 memset(free_start, 0xcc, size); | 3455 memset(free_start, 0xcc, size); |
| 3429 } | 3456 } |
| 3430 freed_bytes = space->UnaccountedFree(free_start, size); | 3457 if (free_list_mode == REBUILD_FREE_LIST) { |
| 3431 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3458 freed_bytes = space->UnaccountedFree(free_start, size); |
| 3459 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 3460 } else { |
| 3461 p->heap()->CreateFillerObjectAt(free_start, size, |
| 3462 ClearRecordedSlots::kNo); |
| 3463 } |
| 3432 } | 3464 } |
| 3433 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3465 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3466 if (free_list_mode == IGNORE_FREE_LIST) return 0; |
| 3434 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3467 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 3435 } | 3468 } |
| 3436 | 3469 |
| 3437 void MarkCompactCollector::InvalidateCode(Code* code) { | 3470 void MarkCompactCollector::InvalidateCode(Code* code) { |
| 3438 if (heap_->incremental_marking()->IsCompacting() && | 3471 if (heap_->incremental_marking()->IsCompacting() && |
| 3439 !ShouldSkipEvacuationSlotRecording(code)) { | 3472 !ShouldSkipEvacuationSlotRecording(code)) { |
| 3440 DCHECK(compacting_); | 3473 DCHECK(compacting_); |
| 3441 | 3474 |
| 3442 // If the object is white than no slots were recorded on it yet. | 3475 // If the object is white than no slots were recorded on it yet. |
| 3443 MarkBit mark_bit = Marking::MarkBitFrom(code); | 3476 MarkBit mark_bit = Marking::MarkBitFrom(code); |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3539 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3572 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| 3540 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3573 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| 3541 Heap::RelocationLock relocation_lock(heap()); | 3574 Heap::RelocationLock relocation_lock(heap()); |
| 3542 | 3575 |
| 3543 { | 3576 { |
| 3544 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); | 3577 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); |
| 3545 EvacuationScope evacuation_scope(this); | 3578 EvacuationScope evacuation_scope(this); |
| 3546 | 3579 |
| 3547 EvacuateNewSpacePrologue(); | 3580 EvacuateNewSpacePrologue(); |
| 3548 EvacuatePagesInParallel(); | 3581 EvacuatePagesInParallel(); |
| 3549 EvacuateNewSpaceEpilogue(); | |
| 3550 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | 3582 heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
| 3551 } | 3583 } |
| 3552 | 3584 |
| 3553 UpdatePointersAfterEvacuation(); | 3585 UpdatePointersAfterEvacuation(); |
| 3554 | 3586 |
| 3587 if (!heap()->new_space()->Rebalance()) { |
| 3588 FatalProcessOutOfMemory("NewSpace::Rebalance"); |
| 3589 } |
| 3590 |
| 3555 // Give pages that are queued to be freed back to the OS. Note that filtering | 3591 // Give pages that are queued to be freed back to the OS. Note that filtering |
| 3556 // slots only handles old space (for unboxed doubles), and thus map space can | 3592 // slots only handles old space (for unboxed doubles), and thus map space can |
| 3557 // still contain stale pointers. We only free the chunks after pointer updates | 3593 // still contain stale pointers. We only free the chunks after pointer updates |
| 3558 // to still have access to page headers. | 3594 // to still have access to page headers. |
| 3559 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3595 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| 3560 | 3596 |
| 3561 { | 3597 { |
| 3562 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3598 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| 3563 | 3599 |
| 3600 for (Page* p : newspace_evacuation_candidates_) { |
| 3601 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3602 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); |
| 3603 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3604 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { |
| 3605 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); |
| 3606 p->ForAllFreeListCategories( |
| 3607 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); |
| 3608 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3609 } |
| 3610 } |
| 3611 newspace_evacuation_candidates_.Rewind(0); |
| 3612 |
| 3564 for (Page* p : evacuation_candidates_) { | 3613 for (Page* p : evacuation_candidates_) { |
| 3565 // Important: skip list should be cleared only after roots were updated | 3614 // Important: skip list should be cleared only after roots were updated |
| 3566 // because root iteration traverses the stack and might have to find | 3615 // because root iteration traverses the stack and might have to find |
| 3567 // code objects from non-updated pc pointing into evacuation candidate. | 3616 // code objects from non-updated pc pointing into evacuation candidate. |
| 3568 SkipList* list = p->skip_list(); | 3617 SkipList* list = p->skip_list(); |
| 3569 if (list != NULL) list->Clear(); | 3618 if (list != NULL) list->Clear(); |
| 3570 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3619 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| 3571 sweeper().AddLatePage(p->owner()->identity(), p); | 3620 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3572 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); | 3621 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
| 3573 } | 3622 } |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3648 // Update the corresponding slot. | 3697 // Update the corresponding slot. |
| 3649 *slot = map_word.ToForwardingAddress(); | 3698 *slot = map_word.ToForwardingAddress(); |
| 3650 } | 3699 } |
| 3651 // If the object was in from space before and is after executing the | 3700 // If the object was in from space before and is after executing the |
| 3652 // callback in to space, the object is still live. | 3701 // callback in to space, the object is still live. |
| 3653 // Unfortunately, we do not know about the slot. It could be in a | 3702 // Unfortunately, we do not know about the slot. It could be in a |
| 3654 // just freed free space object. | 3703 // just freed free space object. |
| 3655 if (heap->InToSpace(*slot)) { | 3704 if (heap->InToSpace(*slot)) { |
| 3656 return KEEP_SLOT; | 3705 return KEEP_SLOT; |
| 3657 } | 3706 } |
| 3707 } else if (heap->InToSpace(*slot)) { |
| 3708 DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address()) |
| 3709 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)); |
| 3710 // Slots can be in "to" space after a page has been moved. Since there is |
| 3711 // no forwarding information present we need to check the markbits to |
| 3712 // determine liveness. |
| 3713 if (Marking::IsBlack( |
| 3714 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot)))) |
| 3715 return KEEP_SLOT; |
| 3658 } else { | 3716 } else { |
| 3659 DCHECK(!heap->InNewSpace(*slot)); | 3717 DCHECK(!heap->InNewSpace(*slot)); |
| 3660 } | 3718 } |
| 3661 return REMOVE_SLOT; | 3719 return REMOVE_SLOT; |
| 3662 } | 3720 } |
| 3663 }; | 3721 }; |
| 3664 | 3722 |
| 3665 int NumberOfPointerUpdateTasks(int pages) { | 3723 int NumberOfPointerUpdateTasks(int pages) { |
| 3666 if (!FLAG_parallel_pointer_update) return 1; | 3724 if (!FLAG_parallel_pointer_update) return 1; |
| 3667 const int kMaxTasks = 4; | 3725 const int kMaxTasks = 4; |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3680 job.Run(num_tasks, [](int i) { return 0; }); | 3738 job.Run(num_tasks, [](int i) { return 0; }); |
| 3681 } | 3739 } |
| 3682 | 3740 |
| 3683 class ToSpacePointerUpdateJobTraits { | 3741 class ToSpacePointerUpdateJobTraits { |
| 3684 public: | 3742 public: |
| 3685 typedef std::pair<Address, Address> PerPageData; | 3743 typedef std::pair<Address, Address> PerPageData; |
| 3686 typedef PointersUpdatingVisitor* PerTaskData; | 3744 typedef PointersUpdatingVisitor* PerTaskData; |
| 3687 | 3745 |
| 3688 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, | 3746 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, |
| 3689 MemoryChunk* chunk, PerPageData limits) { | 3747 MemoryChunk* chunk, PerPageData limits) { |
| 3748 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3749 // New->new promoted pages contain garbage so they require iteration |
| 3750 // using markbits. |
| 3751 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits); |
| 3752 } else { |
| 3753 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits); |
| 3754 } |
| 3755 return true; |
| 3756 } |
| 3757 |
| 3758 static const bool NeedSequentialFinalization = false; |
| 3759 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { |
| 3760 } |
| 3761 |
| 3762 private: |
| 3763 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor, |
| 3764 MemoryChunk* chunk, |
| 3765 PerPageData limits) { |
| 3690 for (Address cur = limits.first; cur < limits.second;) { | 3766 for (Address cur = limits.first; cur < limits.second;) { |
| 3691 HeapObject* object = HeapObject::FromAddress(cur); | 3767 HeapObject* object = HeapObject::FromAddress(cur); |
| 3692 Map* map = object->map(); | 3768 Map* map = object->map(); |
| 3693 int size = object->SizeFromMap(map); | 3769 int size = object->SizeFromMap(map); |
| 3694 object->IterateBody(map->instance_type(), size, visitor); | 3770 object->IterateBody(map->instance_type(), size, visitor); |
| 3695 cur += size; | 3771 cur += size; |
| 3696 } | 3772 } |
| 3697 return true; | |
| 3698 } | 3773 } |
| 3699 static const bool NeedSequentialFinalization = false; | 3774 |
| 3700 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | 3775 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor, |
| 3776 MemoryChunk* chunk, |
| 3777 PerPageData limits) { |
| 3778 LiveObjectIterator<kBlackObjects> it(chunk); |
| 3779 HeapObject* object = NULL; |
| 3780 while ((object = it.Next()) != NULL) { |
| 3781 Map* map = object->map(); |
| 3782 int size = object->SizeFromMap(map); |
| 3783 object->IterateBody(map->instance_type(), size, visitor); |
| 3784 } |
| 3701 } | 3785 } |
| 3702 }; | 3786 }; |
| 3703 | 3787 |
| 3704 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { | 3788 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
| 3705 PageParallelJob<ToSpacePointerUpdateJobTraits> job( | 3789 PageParallelJob<ToSpacePointerUpdateJobTraits> job( |
| 3706 heap, heap->isolate()->cancelable_task_manager(), semaphore); | 3790 heap, heap->isolate()->cancelable_task_manager(), semaphore); |
| 3707 Address space_start = heap->new_space()->bottom(); | 3791 Address space_start = heap->new_space()->bottom(); |
| 3708 Address space_end = heap->new_space()->top(); | 3792 Address space_end = heap->new_space()->top(); |
| 3709 NewSpacePageIterator it(space_start, space_end); | 3793 NewSpacePageIterator it(space_start, space_end); |
| 3710 while (it.has_next()) { | 3794 while (it.has_next()) { |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3766 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3850 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| 3767 } | 3851 } |
| 3768 | 3852 |
| 3769 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, | 3853 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, |
| 3770 int required_freed_bytes, | 3854 int required_freed_bytes, |
| 3771 int max_pages) { | 3855 int max_pages) { |
| 3772 int max_freed = 0; | 3856 int max_freed = 0; |
| 3773 int pages_freed = 0; | 3857 int pages_freed = 0; |
| 3774 Page* page = nullptr; | 3858 Page* page = nullptr; |
| 3775 while ((page = GetSweepingPageSafe(identity)) != nullptr) { | 3859 while ((page = GetSweepingPageSafe(identity)) != nullptr) { |
| 3776 int freed = ParallelSweepPage(page, heap_->paged_space(identity)); | 3860 int freed = ParallelSweepPage(page, identity); |
| 3777 pages_freed += 1; | 3861 pages_freed += 1; |
| 3778 DCHECK_GE(freed, 0); | 3862 DCHECK_GE(freed, 0); |
| 3779 max_freed = Max(max_freed, freed); | 3863 max_freed = Max(max_freed, freed); |
| 3780 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) | 3864 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) |
| 3781 return max_freed; | 3865 return max_freed; |
| 3782 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; | 3866 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; |
| 3783 } | 3867 } |
| 3784 return max_freed; | 3868 return max_freed; |
| 3785 } | 3869 } |
| 3786 | 3870 |
| 3787 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, | 3871 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, |
| 3788 PagedSpace* space) { | 3872 AllocationSpace identity) { |
| 3789 int max_freed = 0; | 3873 int max_freed = 0; |
| 3790 if (page->mutex()->TryLock()) { | 3874 if (page->mutex()->TryLock()) { |
| 3791 // If this page was already swept in the meantime, we can return here. | 3875 // If this page was already swept in the meantime, we can return here. |
| 3792 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3876 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
| 3793 page->mutex()->Unlock(); | 3877 page->mutex()->Unlock(); |
| 3794 return 0; | 3878 return 0; |
| 3795 } | 3879 } |
| 3796 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3880 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3797 if (space->identity() == OLD_SPACE) { | 3881 if (identity == NEW_SPACE) { |
| 3882 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3883 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr); |
| 3884 } else if (identity == OLD_SPACE) { |
| 3798 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3885 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3799 IGNORE_FREE_SPACE>(space, page, NULL); | 3886 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3800 } else if (space->identity() == CODE_SPACE) { | 3887 heap_->paged_space(identity), page, nullptr); |
| 3888 } else if (identity == CODE_SPACE) { |
| 3801 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, | 3889 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, |
| 3802 IGNORE_FREE_SPACE>(space, page, NULL); | 3890 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3891 heap_->paged_space(identity), page, nullptr); |
| 3803 } else { | 3892 } else { |
| 3804 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3893 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3805 IGNORE_FREE_SPACE>(space, page, NULL); | 3894 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( |
| 3895 heap_->paged_space(identity), page, nullptr); |
| 3806 } | 3896 } |
| 3807 { | 3897 { |
| 3808 base::LockGuard<base::Mutex> guard(&mutex_); | 3898 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3809 swept_list_[space->identity()].Add(page); | 3899 swept_list_[identity].Add(page); |
| 3810 } | 3900 } |
| 3811 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3901 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3812 page->mutex()->Unlock(); | 3902 page->mutex()->Unlock(); |
| 3813 } | 3903 } |
| 3814 return max_freed; | 3904 return max_freed; |
| 3815 } | 3905 } |
| 3816 | 3906 |
| 3817 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { | 3907 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { |
| 3818 DCHECK(!sweeping_in_progress_); | 3908 DCHECK(!sweeping_in_progress_); |
| 3819 PrepareToBeSweptPage(space, page); | 3909 PrepareToBeSweptPage(space, page); |
| 3820 sweeping_list_[space].push_back(page); | 3910 sweeping_list_[space].push_back(page); |
| 3821 } | 3911 } |
| 3822 | 3912 |
| 3823 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, | 3913 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, |
| 3824 Page* page) { | 3914 Page* page) { |
| 3825 DCHECK(sweeping_in_progress_); | 3915 DCHECK(sweeping_in_progress_); |
| 3826 PrepareToBeSweptPage(space, page); | 3916 PrepareToBeSweptPage(space, page); |
| 3827 late_pages_ = true; | 3917 late_pages_ = true; |
| 3828 AddSweepingPageSafe(space, page); | 3918 AddSweepingPageSafe(space, page); |
| 3829 } | 3919 } |
| 3830 | 3920 |
| 3831 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, | 3921 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, |
| 3832 Page* page) { | 3922 Page* page) { |
| 3833 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3923 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
| 3834 int to_sweep = page->area_size() - page->LiveBytes(); | 3924 int to_sweep = page->area_size() - page->LiveBytes(); |
| 3835 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); | 3925 if (space != NEW_SPACE) |
| 3926 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); |
| 3836 } | 3927 } |
| 3837 | 3928 |
| 3838 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( | 3929 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( |
| 3839 AllocationSpace space) { | 3930 AllocationSpace space) { |
| 3840 base::LockGuard<base::Mutex> guard(&mutex_); | 3931 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3841 Page* page = nullptr; | 3932 Page* page = nullptr; |
| 3842 if (!sweeping_list_[space].empty()) { | 3933 if (!sweeping_list_[space].empty()) { |
| 3843 page = sweeping_list_[space].front(); | 3934 page = sweeping_list_[space].front(); |
| 3844 sweeping_list_[space].pop_front(); | 3935 sweeping_list_[space].pop_front(); |
| 3845 } | 3936 } |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3889 continue; | 3980 continue; |
| 3890 } | 3981 } |
| 3891 | 3982 |
| 3892 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3983 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
| 3893 // We need to sweep the page to get it into an iterable state again. Note | 3984 // We need to sweep the page to get it into an iterable state again. Note |
| 3894 // that this adds unusable memory into the free list that is later on | 3985 // that this adds unusable memory into the free list that is later on |
| 3895 // (in the free list) dropped again. Since we only use the flag for | 3986 // (in the free list) dropped again. Since we only use the flag for |
| 3896 // testing this is fine. | 3987 // testing this is fine. |
| 3897 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3988 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3898 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | 3989 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3899 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( | 3990 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST, |
| 3900 space, p, nullptr); | 3991 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); |
| 3901 continue; | 3992 continue; |
| 3902 } | 3993 } |
| 3903 | 3994 |
| 3904 // One unused page is kept, all further are released before sweeping them. | 3995 // One unused page is kept, all further are released before sweeping them. |
| 3905 if (p->LiveBytes() == 0) { | 3996 if (p->LiveBytes() == 0) { |
| 3906 if (unused_page_present) { | 3997 if (unused_page_present) { |
| 3907 if (FLAG_gc_verbose) { | 3998 if (FLAG_gc_verbose) { |
| 3908 PrintIsolate(isolate(), "sweeping: released page: %p", | 3999 PrintIsolate(isolate(), "sweeping: released page: %p", |
| 3909 static_cast<void*>(p)); | 4000 static_cast<void*>(p)); |
| 3910 } | 4001 } |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3996 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4087 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 3997 if (Marking::IsBlack(mark_bit)) { | 4088 if (Marking::IsBlack(mark_bit)) { |
| 3998 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4089 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
| 3999 RecordRelocSlot(host, &rinfo, target); | 4090 RecordRelocSlot(host, &rinfo, target); |
| 4000 } | 4091 } |
| 4001 } | 4092 } |
| 4002 } | 4093 } |
| 4003 | 4094 |
| 4004 } // namespace internal | 4095 } // namespace internal |
| 4005 } // namespace v8 | 4096 } // namespace v8 |
| OLD | NEW |