Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(321)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1957323003: [heap] Add page evacuation mode for new->new (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: rebase after sweeper fix on ToT Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 454 matching lines...) Expand 10 before | Expand all | Expand 10 after
465 AllocationSpace space_to_start) 465 AllocationSpace space_to_start)
466 : sweeper_(sweeper), 466 : sweeper_(sweeper),
467 pending_sweeper_tasks_(pending_sweeper_tasks), 467 pending_sweeper_tasks_(pending_sweeper_tasks),
468 space_to_start_(space_to_start) {} 468 space_to_start_(space_to_start) {}
469 469
470 virtual ~SweeperTask() {} 470 virtual ~SweeperTask() {}
471 471
472 private: 472 private:
473 // v8::Task overrides. 473 // v8::Task overrides.
474 void Run() override { 474 void Run() override {
475 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); 475 DCHECK_GE(space_to_start_, FIRST_SPACE);
476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); 476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
477 const int offset = space_to_start_ - FIRST_PAGED_SPACE; 477 const int offset = space_to_start_ - FIRST_SPACE;
478 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; 478 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
479 for (int i = 0; i < num_spaces; i++) { 479 for (int i = 0; i < num_spaces; i++) {
480 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); 480 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
481 DCHECK_GE(space_id, FIRST_PAGED_SPACE); 481 DCHECK_GE(space_id, FIRST_SPACE);
482 DCHECK_LE(space_id, LAST_PAGED_SPACE); 482 DCHECK_LE(space_id, LAST_PAGED_SPACE);
483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); 483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
484 } 484 }
485 pending_sweeper_tasks_->Signal(); 485 pending_sweeper_tasks_->Signal();
486 } 486 }
487 487
488 Sweeper* sweeper_; 488 Sweeper* sweeper_;
489 base::Semaphore* pending_sweeper_tasks_; 489 base::Semaphore* pending_sweeper_tasks_;
490 AllocationSpace space_to_start_; 490 AllocationSpace space_to_start_;
491 491
(...skipping 17 matching lines...) Expand all
509 void MarkCompactCollector::Sweeper::StartSweepingHelper( 509 void MarkCompactCollector::Sweeper::StartSweepingHelper(
510 AllocationSpace space_to_start) { 510 AllocationSpace space_to_start) {
511 num_sweeping_tasks_.Increment(1); 511 num_sweeping_tasks_.Increment(1);
512 V8::GetCurrentPlatform()->CallOnBackgroundThread( 512 V8::GetCurrentPlatform()->CallOnBackgroundThread(
513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), 513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
514 v8::Platform::kShortRunningTask); 514 v8::Platform::kShortRunningTask);
515 } 515 }
516 516
517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( 517 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
518 Page* page) { 518 Page* page) {
519 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
520 if (!page->SweepingDone()) { 519 if (!page->SweepingDone()) {
521 ParallelSweepPage(page, owner); 520 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
521 ParallelSweepPage(page, owner->identity());
522 if (!page->SweepingDone()) { 522 if (!page->SweepingDone()) {
523 // We were not able to sweep that page, i.e., a concurrent 523 // We were not able to sweep that page, i.e., a concurrent
524 // sweeper thread currently owns this page. Wait for the sweeper 524 // sweeper thread currently owns this page. Wait for the sweeper
525 // thread to be done with this page. 525 // thread to be done with this page.
526 page->WaitUntilSweepingCompleted(); 526 page->WaitUntilSweepingCompleted();
527 } 527 }
528 } 528 }
529 } 529 }
530 530
531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { 531 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
(...skipping 22 matching lines...) Expand all
554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); 554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
555 } 555 }
556 556
557 if (FLAG_concurrent_sweeping) { 557 if (FLAG_concurrent_sweeping) {
558 while (num_sweeping_tasks_.Value() > 0) { 558 while (num_sweeping_tasks_.Value() > 0) {
559 pending_sweeper_tasks_semaphore_.Wait(); 559 pending_sweeper_tasks_semaphore_.Wait();
560 num_sweeping_tasks_.Increment(-1); 560 num_sweeping_tasks_.Increment(-1);
561 } 561 }
562 } 562 }
563 563
564 ForAllSweepingSpaces( 564 ForAllSweepingSpaces([this](AllocationSpace space) {
565 [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); }); 565 if (space == NEW_SPACE) {
566 swept_list_[NEW_SPACE].Clear();
567 }
568 DCHECK(sweeping_list_[space].empty());
569 });
566 late_pages_ = false; 570 late_pages_ = false;
567 sweeping_in_progress_ = false; 571 sweeping_in_progress_ = false;
568 } 572 }
569 573
574 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
575 if (!sweeping_in_progress_) return;
576 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
577 NewSpacePageIterator pit(heap_->new_space());
578 while (pit.has_next()) {
579 Page* page = pit.next();
580 SweepOrWaitUntilSweepingCompleted(page);
581 }
582 }
583 }
584
570 void MarkCompactCollector::EnsureSweepingCompleted() { 585 void MarkCompactCollector::EnsureSweepingCompleted() {
571 if (!sweeper().sweeping_in_progress()) return; 586 if (!sweeper().sweeping_in_progress()) return;
572 587
573 sweeper().EnsureCompleted(); 588 sweeper().EnsureCompleted();
574 heap()->old_space()->RefillFreeList(); 589 heap()->old_space()->RefillFreeList();
575 heap()->code_space()->RefillFreeList(); 590 heap()->code_space()->RefillFreeList();
576 heap()->map_space()->RefillFreeList(); 591 heap()->map_space()->RefillFreeList();
577 592
578 #ifdef VERIFY_HEAP 593 #ifdef VERIFY_HEAP
579 if (FLAG_verify_heap && !evacuation()) { 594 if (FLAG_verify_heap && !evacuation()) {
(...skipping 1284 matching lines...) Expand 10 before | Expand all | Expand 10 after
1864 AllocationSpace space_to_allocate_; 1879 AllocationSpace space_to_allocate_;
1865 intptr_t promoted_size_; 1880 intptr_t promoted_size_;
1866 intptr_t semispace_copied_size_; 1881 intptr_t semispace_copied_size_;
1867 HashMap* local_pretenuring_feedback_; 1882 HashMap* local_pretenuring_feedback_;
1868 }; 1883 };
1869 1884
1870 class MarkCompactCollector::EvacuateNewSpacePageVisitor final 1885 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1871 : public MarkCompactCollector::HeapObjectVisitor { 1886 : public MarkCompactCollector::HeapObjectVisitor {
1872 public: 1887 public:
1873 explicit EvacuateNewSpacePageVisitor(Heap* heap) 1888 explicit EvacuateNewSpacePageVisitor(Heap* heap)
1874 : heap_(heap), promoted_size_(0) {} 1889 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
1875 1890
1876 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { 1891 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) {
1877 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { 1892 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) {
1878 Page* new_page = Page::ConvertNewToOld(page, owner); 1893 Page* new_page = Page::ConvertNewToOld(page, owner);
1879 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); 1894 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1880 } 1895 }
1881 } 1896 }
1882 1897
1898 static void MoveToToSpace(Page* page, Space* owner) {
ulan 2016/06/10 12:58:54 Nit: the name seems a bit misleading since it hide
Michael Lippautz 2016/06/10 15:18:15 Done.
1899 page->heap()->new_space()->AddPageToToSpace(page);
1900 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1901 }
1902
1883 inline bool Visit(HeapObject* object) { 1903 inline bool Visit(HeapObject* object) {
1884 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); 1904 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1885 object->IterateBodyFast(&visitor); 1905 object->IterateBodyFast(&visitor);
1886 promoted_size_ += object->Size(); 1906 promoted_size_ += object->Size();
1887 return true; 1907 return true;
1888 } 1908 }
1889 1909
1890 intptr_t promoted_size() { return promoted_size_; } 1910 intptr_t promoted_size() { return promoted_size_; }
1911 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1912
1913 void account_semispace_copied(intptr_t copied) {
1914 semispace_copied_size_ += copied;
1915 }
1891 1916
1892 private: 1917 private:
1893 Heap* heap_; 1918 Heap* heap_;
1894 intptr_t promoted_size_; 1919 intptr_t promoted_size_;
1920 intptr_t semispace_copied_size_;
1895 }; 1921 };
1896 1922
1897 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1923 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1898 : public MarkCompactCollector::EvacuateVisitorBase { 1924 : public MarkCompactCollector::EvacuateVisitorBase {
1899 public: 1925 public:
1900 EvacuateOldSpaceVisitor(Heap* heap, 1926 EvacuateOldSpaceVisitor(Heap* heap,
1901 CompactionSpaceCollection* compaction_spaces) 1927 CompactionSpaceCollection* compaction_spaces)
1902 : EvacuateVisitorBase(heap, compaction_spaces) {} 1928 : EvacuateVisitorBase(heap, compaction_spaces) {}
1903 1929
1904 inline bool Visit(HeapObject* object) override { 1930 inline bool Visit(HeapObject* object) override {
(...skipping 1123 matching lines...) Expand 10 before | Expand all | Expand 10 after
3028 NewSpace* new_space = heap()->new_space(); 3054 NewSpace* new_space = heap()->new_space();
3029 NewSpacePageIterator it(new_space->bottom(), new_space->top()); 3055 NewSpacePageIterator it(new_space->bottom(), new_space->top());
3030 // Append the list of new space pages to be processed. 3056 // Append the list of new space pages to be processed.
3031 while (it.has_next()) { 3057 while (it.has_next()) {
3032 newspace_evacuation_candidates_.Add(it.next()); 3058 newspace_evacuation_candidates_.Add(it.next());
3033 } 3059 }
3034 new_space->Flip(); 3060 new_space->Flip();
3035 new_space->ResetAllocationInfo(); 3061 new_space->ResetAllocationInfo();
3036 } 3062 }
3037 3063
3038 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3039 newspace_evacuation_candidates_.Rewind(0);
3040 }
3041
3042 class MarkCompactCollector::Evacuator : public Malloced { 3064 class MarkCompactCollector::Evacuator : public Malloced {
3043 public: 3065 public:
3066 enum EvacuationMode {
3067 kObjectsNewToOld,
3068 kPageNewToOld,
3069 kObjectsOldToOld,
3070 kPageNewToNew,
3071 };
3072
3073 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3074 // Note: The order of checks is important in this function.
3075 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3076 return kPageNewToOld;
3077 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3078 return kPageNewToNew;
3079 if (chunk->InNewSpace()) return kObjectsNewToOld;
3080 DCHECK(chunk->IsEvacuationCandidate());
3081 return kObjectsOldToOld;
3082 }
3083
3044 // NewSpacePages with more live bytes than this threshold qualify for fast 3084 // NewSpacePages with more live bytes than this threshold qualify for fast
3045 // evacuation. 3085 // evacuation.
3046 static int PageEvacuationThreshold() { 3086 static int PageEvacuationThreshold() {
3047 if (FLAG_page_promotion) 3087 if (FLAG_page_promotion)
3048 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; 3088 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3049 return Page::kAllocatableMemory + kPointerSize; 3089 return Page::kAllocatableMemory + kPointerSize;
3050 } 3090 }
3051 3091
3052 explicit Evacuator(MarkCompactCollector* collector) 3092 explicit Evacuator(MarkCompactCollector* collector)
3053 : collector_(collector), 3093 : collector_(collector),
3054 compaction_spaces_(collector->heap()), 3094 compaction_spaces_(collector->heap()),
3055 local_pretenuring_feedback_(HashMap::PointersMatch, 3095 local_pretenuring_feedback_(HashMap::PointersMatch,
3056 kInitialLocalPretenuringFeedbackCapacity), 3096 kInitialLocalPretenuringFeedbackCapacity),
3057 new_space_visitor_(collector->heap(), &compaction_spaces_, 3097 new_space_visitor_(collector->heap(), &compaction_spaces_,
3058 &local_pretenuring_feedback_), 3098 &local_pretenuring_feedback_),
3059 new_space_page_visitor(collector->heap()), 3099 new_space_page_visitor(collector->heap()),
3060 old_space_visitor_(collector->heap(), &compaction_spaces_), 3100 old_space_visitor_(collector->heap(), &compaction_spaces_),
3061 duration_(0.0), 3101 duration_(0.0),
3062 bytes_compacted_(0) {} 3102 bytes_compacted_(0) {}
3063 3103
3064 inline bool EvacuatePage(Page* chunk); 3104 inline bool EvacuatePage(Page* chunk);
3065 3105
3066 // Merge back locally cached info sequentially. Note that this method needs 3106 // Merge back locally cached info sequentially. Note that this method needs
3067 // to be called from the main thread. 3107 // to be called from the main thread.
3068 inline void Finalize(); 3108 inline void Finalize();
3069 3109
3070 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } 3110 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3071 3111
3072 private: 3112 private:
3073 enum EvacuationMode {
3074 kObjectsNewToOld,
3075 kPageNewToOld,
3076 kObjectsOldToOld,
3077 };
3078
3079 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 3113 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3080 3114
3081 inline Heap* heap() { return collector_->heap(); } 3115 inline Heap* heap() { return collector_->heap(); }
3082 3116
3083 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3084 // Note: The order of checks is important in this function.
3085 if (chunk->InNewSpace()) return kObjectsNewToOld;
3086 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3087 return kPageNewToOld;
3088 DCHECK(chunk->IsEvacuationCandidate());
3089 return kObjectsOldToOld;
3090 }
3091
3092 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { 3117 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3093 duration_ += duration; 3118 duration_ += duration;
3094 bytes_compacted_ += bytes_compacted; 3119 bytes_compacted_ += bytes_compacted;
3095 } 3120 }
3096 3121
3097 template <IterationMode mode, class Visitor>
3098 inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
3099
3100 MarkCompactCollector* collector_; 3122 MarkCompactCollector* collector_;
3101 3123
3102 // Locally cached collector data. 3124 // Locally cached collector data.
3103 CompactionSpaceCollection compaction_spaces_; 3125 CompactionSpaceCollection compaction_spaces_;
3104 HashMap local_pretenuring_feedback_; 3126 HashMap local_pretenuring_feedback_;
3105 3127
3106 // Visitors for the corresponding spaces. 3128 // Visitors for the corresponding spaces.
3107 EvacuateNewSpaceVisitor new_space_visitor_; 3129 EvacuateNewSpaceVisitor new_space_visitor_;
3108 EvacuateNewSpacePageVisitor new_space_page_visitor; 3130 EvacuateNewSpacePageVisitor new_space_page_visitor;
3109 EvacuateOldSpaceVisitor old_space_visitor_; 3131 EvacuateOldSpaceVisitor old_space_visitor_;
3110 3132
3111 // Book keeping info. 3133 // Book keeping info.
3112 double duration_; 3134 double duration_;
3113 intptr_t bytes_compacted_; 3135 intptr_t bytes_compacted_;
3114 }; 3136 };
3115 3137
3116 template <MarkCompactCollector::IterationMode mode, class Visitor> 3138 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3117 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
3118 Visitor* visitor) {
3119 bool success = false; 3139 bool success = false;
3120 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || 3140 DCHECK(page->SweepingDone());
3121 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); 3141 int saved_live_bytes = page->LiveBytes();
3122 int saved_live_bytes = p->LiveBytes(); 3142 double evacuation_time = 0.0;
3123 double evacuation_time; 3143 Heap* heap = page->heap();
3124 { 3144 {
3125 AlwaysAllocateScope always_allocate(heap()->isolate()); 3145 AlwaysAllocateScope always_allocate(heap->isolate());
3126 TimedScope timed_scope(&evacuation_time); 3146 TimedScope timed_scope(&evacuation_time);
3127 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); 3147 switch (ComputeEvacuationMode(page)) {
3148 case kObjectsNewToOld:
3149 success = collector_->VisitLiveObjects(page, &new_space_visitor_,
3150 kClearMarkbits);
3151 ArrayBufferTracker::ProcessBuffers(
3152 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3153 DCHECK(success);
3154 break;
3155 case kPageNewToOld:
3156 success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
3157 kKeepMarking);
3158 // ArrayBufferTracker will be updated during sweeping.
3159 DCHECK(success);
3160 break;
3161 case kPageNewToNew:
3162 new_space_page_visitor.account_semispace_copied(page->LiveBytes());
3163 // ArrayBufferTracker will be updated during sweeping.
3164 success = true;
3165 break;
3166 case kObjectsOldToOld:
3167 success = collector_->VisitLiveObjects(page, &old_space_visitor_,
3168 kClearMarkbits);
3169 if (!success) {
3170 // Aborted compaction page. We have to record slots here, since we
3171 // might not have recorded them in first place.
3172 // Note: We mark the page as aborted here to be able to record slots
3173 // for code objects in |RecordMigratedSlotVisitor|.
3174 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3175 EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3176 success =
3177 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
3178 ArrayBufferTracker::ProcessBuffers(
3179 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3180 DCHECK(success);
3181 // We need to return failure here to indicate that we want this page
3182 // added to the sweeper.
3183 success = false;
3184 } else {
3185 ArrayBufferTracker::ProcessBuffers(
3186 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3187 }
3188 break;
3189 default:
3190 UNREACHABLE();
3191 }
3128 } 3192 }
3193 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3129 if (FLAG_trace_evacuation) { 3194 if (FLAG_trace_evacuation) {
3130 const char age_mark_tag = 3195 PrintIsolate(heap->isolate(),
3131 !p->InNewSpace() 3196 "evacuation[%p]: page=%p new_space=%d "
3132 ? 'x' 3197 "page_evacuation=%d executable=%d contains_age_mark=%d "
3133 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) 3198 "live_bytes=%d time=%f\n",
3134 ? '>' 3199 static_cast<void*>(this), static_cast<void*>(page),
3135 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' 3200 page->InNewSpace(),
3136 : '#'; 3201 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3137 PrintIsolate(heap()->isolate(), 3202 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3138 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " 3203 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3139 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", 3204 page->Contains(heap->new_space()->age_mark()),
3140 static_cast<void*>(this), static_cast<void*>(p), 3205 saved_live_bytes, evacuation_time);
3141 p->InNewSpace(), age_mark_tag,
3142 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
3143 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
3144 evacuation_time);
3145 }
3146 if (success) {
3147 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3148 } 3206 }
3149 return success; 3207 return success;
3150 } 3208 }
3151 3209
3152 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3153 bool result = false;
3154 DCHECK(page->SweepingDone());
3155 switch (ComputeEvacuationMode(page)) {
3156 case kObjectsNewToOld:
3157 result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
3158 ArrayBufferTracker::ProcessBuffers(
3159 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3160 DCHECK(result);
3161 USE(result);
3162 break;
3163 case kPageNewToOld:
3164 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
3165 // ArrayBufferTracker will be updated during sweeping.
3166 DCHECK(result);
3167 USE(result);
3168 break;
3169 case kObjectsOldToOld:
3170 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
3171 if (!result) {
3172 // Aborted compaction page. We have to record slots here, since we might
3173 // not have recorded them in first place.
3174 // Note: We mark the page as aborted here to be able to record slots
3175 // for code objects in |RecordMigratedSlotVisitor|.
3176 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3177 EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3178 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
3179 ArrayBufferTracker::ProcessBuffers(
3180 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3181 DCHECK(result);
3182 USE(result);
3183 // We need to return failure here to indicate that we want this page
3184 // added to the sweeper.
3185 return false;
3186 }
3187 ArrayBufferTracker::ProcessBuffers(
3188 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3189
3190 break;
3191 default:
3192 UNREACHABLE();
3193 }
3194 return result;
3195 }
3196
3197 void MarkCompactCollector::Evacuator::Finalize() { 3210 void MarkCompactCollector::Evacuator::Finalize() {
3198 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); 3211 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3199 heap()->code_space()->MergeCompactionSpace( 3212 heap()->code_space()->MergeCompactionSpace(
3200 compaction_spaces_.Get(CODE_SPACE)); 3213 compaction_spaces_.Get(CODE_SPACE));
3201 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 3214 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3202 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + 3215 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3203 new_space_page_visitor.promoted_size()); 3216 new_space_page_visitor.promoted_size());
3204 heap()->IncrementSemiSpaceCopiedObjectSize( 3217 heap()->IncrementSemiSpaceCopiedObjectSize(
3205 new_space_visitor_.semispace_copied_size()); 3218 new_space_visitor_.semispace_copied_size() +
3219 new_space_page_visitor.semispace_copied_size());
3206 heap()->IncrementYoungSurvivorsCounter( 3220 heap()->IncrementYoungSurvivorsCounter(
3207 new_space_visitor_.promoted_size() + 3221 new_space_visitor_.promoted_size() +
3208 new_space_visitor_.semispace_copied_size() + 3222 new_space_visitor_.semispace_copied_size() +
3209 new_space_page_visitor.promoted_size()); 3223 new_space_page_visitor.promoted_size() +
3224 new_space_page_visitor.semispace_copied_size());
3210 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); 3225 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3211 } 3226 }
3212 3227
3213 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, 3228 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3214 intptr_t live_bytes) { 3229 intptr_t live_bytes) {
3215 if (!FLAG_parallel_compaction) return 1; 3230 if (!FLAG_parallel_compaction) return 1;
3216 // Compute the number of needed tasks based on a target compaction time, the 3231 // Compute the number of needed tasks based on a target compaction time, the
3217 // profiled compaction speed and marked live memory. 3232 // profiled compaction speed and marked live memory.
3218 // 3233 //
3219 // The number of parallel compaction tasks is limited by: 3234 // The number of parallel compaction tasks is limited by:
(...skipping 27 matching lines...) Expand all
3247 3262
3248 static const bool NeedSequentialFinalization = true; 3263 static const bool NeedSequentialFinalization = true;
3249 3264
3250 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, 3265 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3251 MemoryChunk* chunk, PerPageData) { 3266 MemoryChunk* chunk, PerPageData) {
3252 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); 3267 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
3253 } 3268 }
3254 3269
3255 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, 3270 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3256 bool success, PerPageData data) { 3271 bool success, PerPageData data) {
3257 if (chunk->InNewSpace()) { 3272 using Evacuator = MarkCompactCollector::Evacuator;
3258 DCHECK(success); 3273 Page* p = static_cast<Page*>(chunk);
3259 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { 3274 switch (Evacuator::ComputeEvacuationMode(p)) {
3260 DCHECK(success); 3275 case Evacuator::kPageNewToOld:
3261 Page* p = static_cast<Page*>(chunk); 3276 break;
3262 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); 3277 case Evacuator::kPageNewToNew:
3263 p->ForAllFreeListCategories( 3278 DCHECK(success);
3264 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); 3279 break;
3265 heap->mark_compact_collector()->sweeper().AddLatePage( 3280 case Evacuator::kObjectsNewToOld:
3266 p->owner()->identity(), p); 3281 DCHECK(success);
3267 } else { 3282 break;
3268 Page* p = static_cast<Page*>(chunk); 3283 case Evacuator::kObjectsOldToOld:
3269 if (success) { 3284 if (success) {
3270 DCHECK(p->IsEvacuationCandidate()); 3285 DCHECK(p->IsEvacuationCandidate());
3271 DCHECK(p->SweepingDone()); 3286 DCHECK(p->SweepingDone());
3272 p->Unlink(); 3287 p->Unlink();
3273 } else { 3288 } else {
3274 // We have partially compacted the page, i.e., some objects may have 3289 // We have partially compacted the page, i.e., some objects may have
3275 // moved, others are still in place. 3290 // moved, others are still in place.
3276 p->ClearEvacuationCandidate(); 3291 p->ClearEvacuationCandidate();
3277 // Slots have already been recorded so we just need to add it to the 3292 // Slots have already been recorded so we just need to add it to the
3278 // sweeper. 3293 // sweeper, which will happen after updating pointers.
3279 *data += 1; 3294 *data += 1;
3280 } 3295 }
3296 break;
3297 default:
3298 UNREACHABLE();
3281 } 3299 }
3282 } 3300 }
3283 }; 3301 };
3284 3302
3285 void MarkCompactCollector::EvacuatePagesInParallel() { 3303 void MarkCompactCollector::EvacuatePagesInParallel() {
3286 PageParallelJob<EvacuationJobTraits> job( 3304 PageParallelJob<EvacuationJobTraits> job(
3287 heap_, heap_->isolate()->cancelable_task_manager(), 3305 heap_, heap_->isolate()->cancelable_task_manager(),
3288 &page_parallel_job_semaphore_); 3306 &page_parallel_job_semaphore_);
3289 3307
3290 int abandoned_pages = 0; 3308 int abandoned_pages = 0;
3291 intptr_t live_bytes = 0; 3309 intptr_t live_bytes = 0;
3292 for (Page* page : evacuation_candidates_) { 3310 for (Page* page : evacuation_candidates_) {
3293 live_bytes += page->LiveBytes(); 3311 live_bytes += page->LiveBytes();
3294 job.AddPage(page, &abandoned_pages); 3312 job.AddPage(page, &abandoned_pages);
3295 } 3313 }
3296 3314
3297 const Address age_mark = heap()->new_space()->age_mark(); 3315 const Address age_mark = heap()->new_space()->age_mark();
3298 for (Page* page : newspace_evacuation_candidates_) { 3316 for (Page* page : newspace_evacuation_candidates_) {
3299 live_bytes += page->LiveBytes(); 3317 live_bytes += page->LiveBytes();
3300 if (!page->NeverEvacuate() && 3318 if (!page->NeverEvacuate() &&
3301 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && 3319 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3302 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
3303 !page->Contains(age_mark)) { 3320 !page->Contains(age_mark)) {
3304 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space()); 3321 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3322 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page,
3323 heap()->old_space());
3324 } else {
3325 EvacuateNewSpacePageVisitor::MoveToToSpace(page, heap()->new_space());
3326 }
3305 } 3327 }
3328
3306 job.AddPage(page, &abandoned_pages); 3329 job.AddPage(page, &abandoned_pages);
3307 } 3330 }
3308 DCHECK_GE(job.NumberOfPages(), 1); 3331 DCHECK_GE(job.NumberOfPages(), 1);
3309 3332
3310 // Used for trace summary. 3333 // Used for trace summary.
3311 double compaction_speed = 0; 3334 double compaction_speed = 0;
3312 if (FLAG_trace_evacuation) { 3335 if (FLAG_trace_evacuation) {
3313 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3336 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3314 } 3337 }
3315 3338
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
3349 return map_word.ToForwardingAddress(); 3372 return map_word.ToForwardingAddress();
3350 } 3373 }
3351 } 3374 }
3352 return object; 3375 return object;
3353 } 3376 }
3354 }; 3377 };
3355 3378
3356 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, 3379 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
3357 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, 3380 MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
3358 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, 3381 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
3382 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
3359 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> 3383 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
3360 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, 3384 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
3361 ObjectVisitor* v) { 3385 ObjectVisitor* v) {
3362 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); 3386 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3363 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); 3387 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
3364 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, 3388 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
3365 space->identity() == CODE_SPACE); 3389 (skip_list_mode == REBUILD_SKIP_LIST));
3366 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); 3390 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3367 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); 3391 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
3368 3392
3369 // Before we sweep objects on the page, we free dead array buffers which 3393 // Before we sweep objects on the page, we free dead array buffers which
3370 // requires valid mark bits. 3394 // requires valid mark bits.
3371 ArrayBufferTracker::FreeDead(p); 3395 ArrayBufferTracker::FreeDead(p);
3372 3396
3373 Address free_start = p->area_start(); 3397 Address free_start = p->area_start();
3374 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); 3398 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3375 3399
(...skipping 12 matching lines...) Expand all
3388 LiveObjectIterator<kBlackObjects> it(p); 3412 LiveObjectIterator<kBlackObjects> it(p);
3389 HeapObject* object = NULL; 3413 HeapObject* object = NULL;
3390 while ((object = it.Next()) != NULL) { 3414 while ((object = it.Next()) != NULL) {
3391 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3415 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3392 Address free_end = object->address(); 3416 Address free_end = object->address();
3393 if (free_end != free_start) { 3417 if (free_end != free_start) {
3394 int size = static_cast<int>(free_end - free_start); 3418 int size = static_cast<int>(free_end - free_start);
3395 if (free_space_mode == ZAP_FREE_SPACE) { 3419 if (free_space_mode == ZAP_FREE_SPACE) {
3396 memset(free_start, 0xcc, size); 3420 memset(free_start, 0xcc, size);
3397 } 3421 }
3398 freed_bytes = space->UnaccountedFree(free_start, size); 3422 if (free_list_mode == REBUILD_FREE_LIST) {
3399 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3423 freed_bytes = space->UnaccountedFree(free_start, size);
3424 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3425 } else {
3426 p->heap()->CreateFillerObjectAt(free_start, size,
3427 ClearRecordedSlots::kNo);
3428 }
3400 } 3429 }
3401 Map* map = object->synchronized_map(); 3430 Map* map = object->synchronized_map();
3402 int size = object->SizeFromMap(map); 3431 int size = object->SizeFromMap(map);
3403 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { 3432 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3404 object->IterateBody(map->instance_type(), size, v); 3433 object->IterateBody(map->instance_type(), size, v);
3405 } 3434 }
3406 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { 3435 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3407 int new_region_start = SkipList::RegionNumber(free_end); 3436 int new_region_start = SkipList::RegionNumber(free_end);
3408 int new_region_end = 3437 int new_region_end =
3409 SkipList::RegionNumber(free_end + size - kPointerSize); 3438 SkipList::RegionNumber(free_end + size - kPointerSize);
3410 if (new_region_start != curr_region || new_region_end != curr_region) { 3439 if (new_region_start != curr_region || new_region_end != curr_region) {
3411 skip_list->AddObject(free_end, size); 3440 skip_list->AddObject(free_end, size);
3412 curr_region = new_region_end; 3441 curr_region = new_region_end;
3413 } 3442 }
3414 } 3443 }
3415 free_start = free_end + size; 3444 free_start = free_end + size;
3416 } 3445 }
3417 3446
3418 // Clear the mark bits of that page and reset live bytes count. 3447 // Clear the mark bits of that page and reset live bytes count.
3419 Bitmap::Clear(p); 3448 Bitmap::Clear(p);
3420 3449
3421 if (free_start != p->area_end()) { 3450 if (free_start != p->area_end()) {
3422 int size = static_cast<int>(p->area_end() - free_start); 3451 int size = static_cast<int>(p->area_end() - free_start);
3423 if (free_space_mode == ZAP_FREE_SPACE) { 3452 if (free_space_mode == ZAP_FREE_SPACE) {
3424 memset(free_start, 0xcc, size); 3453 memset(free_start, 0xcc, size);
3425 } 3454 }
3426 freed_bytes = space->UnaccountedFree(free_start, size); 3455 if (free_list_mode == REBUILD_FREE_LIST) {
3427 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3456 freed_bytes = space->UnaccountedFree(free_start, size);
3457 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3458 } else {
3459 p->heap()->CreateFillerObjectAt(free_start, size,
3460 ClearRecordedSlots::kNo);
3461 }
3428 } 3462 }
3429 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); 3463 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3464 if (free_list_mode == IGNORE_FREE_LIST) return 0;
3430 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); 3465 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3431 } 3466 }
3432 3467
3433 void MarkCompactCollector::InvalidateCode(Code* code) { 3468 void MarkCompactCollector::InvalidateCode(Code* code) {
3434 if (heap_->incremental_marking()->IsCompacting() && 3469 if (heap_->incremental_marking()->IsCompacting() &&
3435 !ShouldSkipEvacuationSlotRecording(code)) { 3470 !ShouldSkipEvacuationSlotRecording(code)) {
3436 DCHECK(compacting_); 3471 DCHECK(compacting_);
3437 3472
3438 // If the object is white than no slots were recorded on it yet. 3473 // If the object is white than no slots were recorded on it yet.
3439 MarkBit mark_bit = Marking::MarkBitFrom(code); 3474 MarkBit mark_bit = Marking::MarkBitFrom(code);
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
3535 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3570 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3536 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); 3571 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3537 Heap::RelocationLock relocation_lock(heap()); 3572 Heap::RelocationLock relocation_lock(heap());
3538 3573
3539 { 3574 {
3540 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); 3575 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3541 EvacuationScope evacuation_scope(this); 3576 EvacuationScope evacuation_scope(this);
3542 3577
3543 EvacuateNewSpacePrologue(); 3578 EvacuateNewSpacePrologue();
3544 EvacuatePagesInParallel(); 3579 EvacuatePagesInParallel();
3545 EvacuateNewSpaceEpilogue();
3546 heap()->new_space()->set_age_mark(heap()->new_space()->top()); 3580 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3547 } 3581 }
3548 3582
3549 UpdatePointersAfterEvacuation(); 3583 UpdatePointersAfterEvacuation();
3550 3584
3551 // Give pages that are queued to be freed back to the OS. Note that filtering 3585 // Give pages that are queued to be freed back to the OS. Note that filtering
3552 // slots only handles old space (for unboxed doubles), and thus map space can 3586 // slots only handles old space (for unboxed doubles), and thus map space can
3553 // still contain stale pointers. We only free the chunks after pointer updates 3587 // still contain stale pointers. We only free the chunks after pointer updates
3554 // to still have access to page headers. 3588 // to still have access to page headers.
3555 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 3589 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3556 3590
3557 { 3591 {
3558 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 3592 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3559 3593
3594 for (Page* p : newspace_evacuation_candidates_) {
3595 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3596 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3597 sweeper().AddLatePage(p->owner()->identity(), p);
3598 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3599 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3600 p->ForAllFreeListCategories(
3601 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3602 sweeper().AddLatePage(p->owner()->identity(), p);
3603 }
3604 }
3605 newspace_evacuation_candidates_.Rewind(0);
3606
3560 for (Page* p : evacuation_candidates_) { 3607 for (Page* p : evacuation_candidates_) {
3561 // Important: skip list should be cleared only after roots were updated 3608 // Important: skip list should be cleared only after roots were updated
3562 // because root iteration traverses the stack and might have to find 3609 // because root iteration traverses the stack and might have to find
3563 // code objects from non-updated pc pointing into evacuation candidate. 3610 // code objects from non-updated pc pointing into evacuation candidate.
3564 SkipList* list = p->skip_list(); 3611 SkipList* list = p->skip_list();
3565 if (list != NULL) list->Clear(); 3612 if (list != NULL) list->Clear();
3566 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 3613 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3567 sweeper().AddLatePage(p->owner()->identity(), p); 3614 sweeper().AddLatePage(p->owner()->identity(), p);
3568 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); 3615 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3569 } 3616 }
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
3644 // Update the corresponding slot. 3691 // Update the corresponding slot.
3645 *slot = map_word.ToForwardingAddress(); 3692 *slot = map_word.ToForwardingAddress();
3646 } 3693 }
3647 // If the object was in from space before and is after executing the 3694 // If the object was in from space before and is after executing the
3648 // callback in to space, the object is still live. 3695 // callback in to space, the object is still live.
3649 // Unfortunately, we do not know about the slot. It could be in a 3696 // Unfortunately, we do not know about the slot. It could be in a
3650 // just freed free space object. 3697 // just freed free space object.
3651 if (heap->InToSpace(*slot)) { 3698 if (heap->InToSpace(*slot)) {
3652 return KEEP_SLOT; 3699 return KEEP_SLOT;
3653 } 3700 }
3701 } else if (heap->InToSpace(*slot)) {
3702 // Slots can be in "to" space after a page has been moved. Since there is
ulan 2016/06/10 12:58:54 DCHECK that the page has new_to_new promotion flag
Michael Lippautz 2016/06/10 15:18:15 Done.
3703 // no forwarding information present we need to check the markbits to
3704 // determine liveness.
3705 if (Marking::IsBlack(
3706 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
3707 return KEEP_SLOT;
3654 } else { 3708 } else {
3655 DCHECK(!heap->InNewSpace(*slot)); 3709 DCHECK(!heap->InNewSpace(*slot));
3656 } 3710 }
3657 return REMOVE_SLOT; 3711 return REMOVE_SLOT;
3658 } 3712 }
3659 }; 3713 };
3660 3714
3661 int NumberOfPointerUpdateTasks(int pages) { 3715 int NumberOfPointerUpdateTasks(int pages) {
3662 if (!FLAG_parallel_pointer_update) return 1; 3716 if (!FLAG_parallel_pointer_update) return 1;
3663 const int kMaxTasks = 4; 3717 const int kMaxTasks = 4;
(...skipping 12 matching lines...) Expand all
3676 job.Run(num_tasks, [](int i) { return 0; }); 3730 job.Run(num_tasks, [](int i) { return 0; });
3677 } 3731 }
3678 3732
3679 class ToSpacePointerUpdateJobTraits { 3733 class ToSpacePointerUpdateJobTraits {
3680 public: 3734 public:
3681 typedef std::pair<Address, Address> PerPageData; 3735 typedef std::pair<Address, Address> PerPageData;
3682 typedef PointersUpdatingVisitor* PerTaskData; 3736 typedef PointersUpdatingVisitor* PerTaskData;
3683 3737
3684 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, 3738 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3685 MemoryChunk* chunk, PerPageData limits) { 3739 MemoryChunk* chunk, PerPageData limits) {
3740 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3741 // New->new promoted pages contain garbage so they require iteration
3742 // using markbits.
3743 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
3744 } else {
3745 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
3746 }
3747 return true;
3748 }
3749
3750 static const bool NeedSequentialFinalization = false;
3751 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3752 }
3753
3754 private:
3755 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
3756 MemoryChunk* chunk,
3757 PerPageData limits) {
3686 for (Address cur = limits.first; cur < limits.second;) { 3758 for (Address cur = limits.first; cur < limits.second;) {
3687 HeapObject* object = HeapObject::FromAddress(cur); 3759 HeapObject* object = HeapObject::FromAddress(cur);
3688 Map* map = object->map(); 3760 Map* map = object->map();
3689 int size = object->SizeFromMap(map); 3761 int size = object->SizeFromMap(map);
3690 object->IterateBody(map->instance_type(), size, visitor); 3762 object->IterateBody(map->instance_type(), size, visitor);
3691 cur += size; 3763 cur += size;
3692 } 3764 }
3693 return true;
3694 } 3765 }
3695 static const bool NeedSequentialFinalization = false; 3766
3696 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { 3767 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
3768 MemoryChunk* chunk,
3769 PerPageData limits) {
3770 LiveObjectIterator<kBlackObjects> it(chunk);
3771 HeapObject* object = NULL;
3772 while ((object = it.Next()) != NULL) {
3773 Map* map = object->map();
3774 int size = object->SizeFromMap(map);
3775 object->IterateBody(map->instance_type(), size, visitor);
3776 }
3697 } 3777 }
3698 }; 3778 };
3699 3779
3700 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { 3780 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3701 PageParallelJob<ToSpacePointerUpdateJobTraits> job( 3781 PageParallelJob<ToSpacePointerUpdateJobTraits> job(
3702 heap, heap->isolate()->cancelable_task_manager(), semaphore); 3782 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3703 Address space_start = heap->new_space()->bottom(); 3783 Address space_start = heap->new_space()->bottom();
3704 Address space_end = heap->new_space()->top(); 3784 Address space_end = heap->new_space()->top();
3705 NewSpacePageIterator it(space_start, space_end); 3785 NewSpacePageIterator it(space_start, space_end);
3706 while (it.has_next()) { 3786 while (it.has_next()) {
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
3762 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 3842 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3763 } 3843 }
3764 3844
3765 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, 3845 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
3766 int required_freed_bytes, 3846 int required_freed_bytes,
3767 int max_pages) { 3847 int max_pages) {
3768 int max_freed = 0; 3848 int max_freed = 0;
3769 int pages_freed = 0; 3849 int pages_freed = 0;
3770 Page* page = nullptr; 3850 Page* page = nullptr;
3771 while ((page = GetSweepingPageSafe(identity)) != nullptr) { 3851 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
3772 int freed = ParallelSweepPage(page, heap_->paged_space(identity)); 3852 int freed = ParallelSweepPage(page, identity);
3773 pages_freed += 1; 3853 pages_freed += 1;
3774 DCHECK_GE(freed, 0); 3854 DCHECK_GE(freed, 0);
3775 max_freed = Max(max_freed, freed); 3855 max_freed = Max(max_freed, freed);
3776 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) 3856 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
3777 return max_freed; 3857 return max_freed;
3778 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; 3858 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
3779 } 3859 }
3780 return max_freed; 3860 return max_freed;
3781 } 3861 }
3782 3862
3783 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, 3863 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
3784 PagedSpace* space) { 3864 AllocationSpace identity) {
3785 int max_freed = 0; 3865 int max_freed = 0;
3786 if (page->mutex()->TryLock()) { 3866 if (page->mutex()->TryLock()) {
3787 // If this page was already swept in the meantime, we can return here. 3867 // If this page was already swept in the meantime, we can return here.
3788 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { 3868 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
3789 page->mutex()->Unlock(); 3869 page->mutex()->Unlock();
3790 return 0; 3870 return 0;
3791 } 3871 }
3792 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 3872 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3793 if (space->identity() == OLD_SPACE) { 3873 if (identity == NEW_SPACE) {
3874 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3875 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
3876 } else if (identity == OLD_SPACE) {
3794 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, 3877 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3795 IGNORE_FREE_SPACE>(space, page, NULL); 3878 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3796 } else if (space->identity() == CODE_SPACE) { 3879 heap_->paged_space(identity), page, nullptr);
3880 } else if (identity == CODE_SPACE) {
3797 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, 3881 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3798 IGNORE_FREE_SPACE>(space, page, NULL); 3882 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3883 heap_->paged_space(identity), page, nullptr);
3799 } else { 3884 } else {
3800 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, 3885 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3801 IGNORE_FREE_SPACE>(space, page, NULL); 3886 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3887 heap_->paged_space(identity), page, nullptr);
3802 } 3888 }
3803 { 3889 {
3804 base::LockGuard<base::Mutex> guard(&mutex_); 3890 base::LockGuard<base::Mutex> guard(&mutex_);
3805 swept_list_[space->identity()].Add(page); 3891 swept_list_[identity].Add(page);
3806 } 3892 }
3807 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); 3893 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3808 page->mutex()->Unlock(); 3894 page->mutex()->Unlock();
3809 } 3895 }
3810 return max_freed; 3896 return max_freed;
3811 } 3897 }
3812 3898
3813 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { 3899 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3814 DCHECK(!sweeping_in_progress_); 3900 DCHECK(!sweeping_in_progress_);
3815 PrepareToBeSweptPage(space, page); 3901 PrepareToBeSweptPage(space, page);
3816 sweeping_list_[space].push_back(page); 3902 sweeping_list_[space].push_back(page);
3817 } 3903 }
3818 3904
3819 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, 3905 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
3820 Page* page) { 3906 Page* page) {
3821 DCHECK(sweeping_in_progress_); 3907 DCHECK(sweeping_in_progress_);
3822 PrepareToBeSweptPage(space, page); 3908 PrepareToBeSweptPage(space, page);
3823 late_pages_ = true; 3909 late_pages_ = true;
3824 AddSweepingPageSafe(space, page); 3910 AddSweepingPageSafe(space, page);
3825 } 3911 }
3826 3912
3827 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, 3913 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3828 Page* page) { 3914 Page* page) {
3829 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); 3915 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3830 int to_sweep = page->area_size() - page->LiveBytes(); 3916 int to_sweep = page->area_size() - page->LiveBytes();
3831 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); 3917 if (space != NEW_SPACE)
ulan 2016/06/10 12:58:54 So we are accounting the free space in new->new pa
Michael Lippautz 2016/06/10 15:18:15 Should not be the case, because SizeOfObjects in N
3918 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
3832 } 3919 }
3833 3920
3834 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( 3921 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3835 AllocationSpace space) { 3922 AllocationSpace space) {
3836 base::LockGuard<base::Mutex> guard(&mutex_); 3923 base::LockGuard<base::Mutex> guard(&mutex_);
3837 Page* page = nullptr; 3924 Page* page = nullptr;
3838 if (!sweeping_list_[space].empty()) { 3925 if (!sweeping_list_[space].empty()) {
3839 page = sweeping_list_[space].front(); 3926 page = sweeping_list_[space].front();
3840 sweeping_list_[space].pop_front(); 3927 sweeping_list_[space].pop_front();
3841 } 3928 }
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
3885 continue; 3972 continue;
3886 } 3973 }
3887 3974
3888 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { 3975 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3889 // We need to sweep the page to get it into an iterable state again. Note 3976 // We need to sweep the page to get it into an iterable state again. Note
3890 // that this adds unusable memory into the free list that is later on 3977 // that this adds unusable memory into the free list that is later on
3891 // (in the free list) dropped again. Since we only use the flag for 3978 // (in the free list) dropped again. Since we only use the flag for
3892 // testing this is fine. 3979 // testing this is fine.
3893 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 3980 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3894 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, 3981 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
3895 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( 3982 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
3896 space, p, nullptr); 3983 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
3897 continue; 3984 continue;
3898 } 3985 }
3899 3986
3900 // One unused page is kept, all further are released before sweeping them. 3987 // One unused page is kept, all further are released before sweeping them.
3901 if (p->LiveBytes() == 0) { 3988 if (p->LiveBytes() == 0) {
3902 if (unused_page_present) { 3989 if (unused_page_present) {
3903 if (FLAG_gc_verbose) { 3990 if (FLAG_gc_verbose) {
3904 PrintIsolate(isolate(), "sweeping: released page: %p", 3991 PrintIsolate(isolate(), "sweeping: released page: %p",
3905 static_cast<void*>(p)); 3992 static_cast<void*>(p));
3906 } 3993 }
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
3992 MarkBit mark_bit = Marking::MarkBitFrom(host); 4079 MarkBit mark_bit = Marking::MarkBitFrom(host);
3993 if (Marking::IsBlack(mark_bit)) { 4080 if (Marking::IsBlack(mark_bit)) {
3994 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 4081 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3995 RecordRelocSlot(host, &rinfo, target); 4082 RecordRelocSlot(host, &rinfo, target);
3996 } 4083 }
3997 } 4084 }
3998 } 4085 }
3999 4086
4000 } // namespace internal 4087 } // namespace internal
4001 } // namespace v8 4088 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | src/heap/spaces.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698