Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(29)

Side by Side Diff: src/heap/mark-compact.cc

Issue 2078863002: Reland "[heap] Add page evacuation mode for new->new" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Add test to test/cctest/BUILD.gn Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 453 matching lines...) Expand 10 before | Expand all | Expand 10 after
464 AllocationSpace space_to_start) 464 AllocationSpace space_to_start)
465 : sweeper_(sweeper), 465 : sweeper_(sweeper),
466 pending_sweeper_tasks_(pending_sweeper_tasks), 466 pending_sweeper_tasks_(pending_sweeper_tasks),
467 space_to_start_(space_to_start) {} 467 space_to_start_(space_to_start) {}
468 468
469 virtual ~SweeperTask() {} 469 virtual ~SweeperTask() {}
470 470
471 private: 471 private:
472 // v8::Task overrides. 472 // v8::Task overrides.
473 void Run() override { 473 void Run() override {
474 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); 474 DCHECK_GE(space_to_start_, FIRST_SPACE);
475 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); 475 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
476 const int offset = space_to_start_ - FIRST_PAGED_SPACE; 476 const int offset = space_to_start_ - FIRST_SPACE;
477 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; 477 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
478 for (int i = 0; i < num_spaces; i++) { 478 for (int i = 0; i < num_spaces; i++) {
479 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); 479 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
480 DCHECK_GE(space_id, FIRST_PAGED_SPACE); 480 DCHECK_GE(space_id, FIRST_SPACE);
481 DCHECK_LE(space_id, LAST_PAGED_SPACE); 481 DCHECK_LE(space_id, LAST_PAGED_SPACE);
482 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); 482 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
483 } 483 }
484 pending_sweeper_tasks_->Signal(); 484 pending_sweeper_tasks_->Signal();
485 } 485 }
486 486
487 Sweeper* sweeper_; 487 Sweeper* sweeper_;
488 base::Semaphore* pending_sweeper_tasks_; 488 base::Semaphore* pending_sweeper_tasks_;
489 AllocationSpace space_to_start_; 489 AllocationSpace space_to_start_;
490 490
(...skipping 17 matching lines...) Expand all
508 void MarkCompactCollector::Sweeper::StartSweepingHelper( 508 void MarkCompactCollector::Sweeper::StartSweepingHelper(
509 AllocationSpace space_to_start) { 509 AllocationSpace space_to_start) {
510 num_sweeping_tasks_.Increment(1); 510 num_sweeping_tasks_.Increment(1);
511 V8::GetCurrentPlatform()->CallOnBackgroundThread( 511 V8::GetCurrentPlatform()->CallOnBackgroundThread(
512 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), 512 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
513 v8::Platform::kShortRunningTask); 513 v8::Platform::kShortRunningTask);
514 } 514 }
515 515
516 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( 516 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
517 Page* page) { 517 Page* page) {
518 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
519 if (!page->SweepingDone()) { 518 if (!page->SweepingDone()) {
520 ParallelSweepPage(page, owner); 519 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
520 ParallelSweepPage(page, owner->identity());
521 if (!page->SweepingDone()) { 521 if (!page->SweepingDone()) {
522 // We were not able to sweep that page, i.e., a concurrent 522 // We were not able to sweep that page, i.e., a concurrent
523 // sweeper thread currently owns this page. Wait for the sweeper 523 // sweeper thread currently owns this page. Wait for the sweeper
524 // thread to be done with this page. 524 // thread to be done with this page.
525 page->WaitUntilSweepingCompleted(); 525 page->WaitUntilSweepingCompleted();
526 } 526 }
527 } 527 }
528 } 528 }
529 529
530 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { 530 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
(...skipping 22 matching lines...) Expand all
553 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); 553 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
554 } 554 }
555 555
556 if (FLAG_concurrent_sweeping) { 556 if (FLAG_concurrent_sweeping) {
557 while (num_sweeping_tasks_.Value() > 0) { 557 while (num_sweeping_tasks_.Value() > 0) {
558 pending_sweeper_tasks_semaphore_.Wait(); 558 pending_sweeper_tasks_semaphore_.Wait();
559 num_sweeping_tasks_.Increment(-1); 559 num_sweeping_tasks_.Increment(-1);
560 } 560 }
561 } 561 }
562 562
563 ForAllSweepingSpaces( 563 ForAllSweepingSpaces([this](AllocationSpace space) {
564 [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); }); 564 if (space == NEW_SPACE) {
565 swept_list_[NEW_SPACE].Clear();
566 }
567 DCHECK(sweeping_list_[space].empty());
568 });
565 late_pages_ = false; 569 late_pages_ = false;
566 sweeping_in_progress_ = false; 570 sweeping_in_progress_ = false;
567 } 571 }
568 572
573 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
574 if (!sweeping_in_progress_) return;
575 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
576 NewSpacePageIterator pit(heap_->new_space());
577 while (pit.has_next()) {
578 Page* page = pit.next();
579 SweepOrWaitUntilSweepingCompleted(page);
580 }
581 }
582 }
583
569 void MarkCompactCollector::EnsureSweepingCompleted() { 584 void MarkCompactCollector::EnsureSweepingCompleted() {
570 if (!sweeper().sweeping_in_progress()) return; 585 if (!sweeper().sweeping_in_progress()) return;
571 586
572 sweeper().EnsureCompleted(); 587 sweeper().EnsureCompleted();
573 heap()->old_space()->RefillFreeList(); 588 heap()->old_space()->RefillFreeList();
574 heap()->code_space()->RefillFreeList(); 589 heap()->code_space()->RefillFreeList();
575 heap()->map_space()->RefillFreeList(); 590 heap()->map_space()->RefillFreeList();
576 591
577 #ifdef VERIFY_HEAP 592 #ifdef VERIFY_HEAP
578 if (FLAG_verify_heap && !evacuation()) { 593 if (FLAG_verify_heap && !evacuation()) {
(...skipping 1297 matching lines...) Expand 10 before | Expand all | Expand 10 after
1876 AllocationSpace space_to_allocate_; 1891 AllocationSpace space_to_allocate_;
1877 intptr_t promoted_size_; 1892 intptr_t promoted_size_;
1878 intptr_t semispace_copied_size_; 1893 intptr_t semispace_copied_size_;
1879 base::HashMap* local_pretenuring_feedback_; 1894 base::HashMap* local_pretenuring_feedback_;
1880 }; 1895 };
1881 1896
1882 class MarkCompactCollector::EvacuateNewSpacePageVisitor final 1897 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1883 : public MarkCompactCollector::HeapObjectVisitor { 1898 : public MarkCompactCollector::HeapObjectVisitor {
1884 public: 1899 public:
1885 explicit EvacuateNewSpacePageVisitor(Heap* heap) 1900 explicit EvacuateNewSpacePageVisitor(Heap* heap)
1886 : heap_(heap), promoted_size_(0) {} 1901 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
1887 1902
1888 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { 1903 static void MoveToOldSpace(Page* page, PagedSpace* owner) {
1889 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { 1904 page->Unlink();
1890 Page* new_page = Page::ConvertNewToOld(page, owner); 1905 Page* new_page = Page::ConvertNewToOld(page, owner);
1891 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); 1906 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1892 } 1907 }
1908
1909 static void MoveToToSpace(Page* page) {
1910 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1911 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1893 } 1912 }
1894 1913
1895 inline bool Visit(HeapObject* object) { 1914 inline bool Visit(HeapObject* object) {
1896 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector()); 1915 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1897 object->IterateBodyFast(&visitor); 1916 object->IterateBodyFast(&visitor);
1898 promoted_size_ += object->Size(); 1917 promoted_size_ += object->Size();
1899 return true; 1918 return true;
1900 } 1919 }
1901 1920
1902 intptr_t promoted_size() { return promoted_size_; } 1921 intptr_t promoted_size() { return promoted_size_; }
1922 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1923
1924 void account_semispace_copied(intptr_t copied) {
1925 semispace_copied_size_ += copied;
1926 }
1903 1927
1904 private: 1928 private:
1905 Heap* heap_; 1929 Heap* heap_;
1906 intptr_t promoted_size_; 1930 intptr_t promoted_size_;
1931 intptr_t semispace_copied_size_;
1907 }; 1932 };
1908 1933
1909 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1934 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1910 : public MarkCompactCollector::EvacuateVisitorBase { 1935 : public MarkCompactCollector::EvacuateVisitorBase {
1911 public: 1936 public:
1912 EvacuateOldSpaceVisitor(Heap* heap, 1937 EvacuateOldSpaceVisitor(Heap* heap,
1913 CompactionSpaceCollection* compaction_spaces) 1938 CompactionSpaceCollection* compaction_spaces)
1914 : EvacuateVisitorBase(heap, compaction_spaces) {} 1939 : EvacuateVisitorBase(heap, compaction_spaces) {}
1915 1940
1916 inline bool Visit(HeapObject* object) override { 1941 inline bool Visit(HeapObject* object) override {
(...skipping 1119 matching lines...) Expand 10 before | Expand all | Expand 10 after
3036 NewSpace* new_space = heap()->new_space(); 3061 NewSpace* new_space = heap()->new_space();
3037 NewSpacePageIterator it(new_space->bottom(), new_space->top()); 3062 NewSpacePageIterator it(new_space->bottom(), new_space->top());
3038 // Append the list of new space pages to be processed. 3063 // Append the list of new space pages to be processed.
3039 while (it.has_next()) { 3064 while (it.has_next()) {
3040 newspace_evacuation_candidates_.Add(it.next()); 3065 newspace_evacuation_candidates_.Add(it.next());
3041 } 3066 }
3042 new_space->Flip(); 3067 new_space->Flip();
3043 new_space->ResetAllocationInfo(); 3068 new_space->ResetAllocationInfo();
3044 } 3069 }
3045 3070
3046 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3047 newspace_evacuation_candidates_.Rewind(0);
3048 }
3049
3050 class MarkCompactCollector::Evacuator : public Malloced { 3071 class MarkCompactCollector::Evacuator : public Malloced {
3051 public: 3072 public:
3073 enum EvacuationMode {
3074 kObjectsNewToOld,
3075 kPageNewToOld,
3076 kObjectsOldToOld,
3077 kPageNewToNew,
3078 };
3079
3080 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3081 // Note: The order of checks is important in this function.
3082 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3083 return kPageNewToOld;
3084 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3085 return kPageNewToNew;
3086 if (chunk->InNewSpace()) return kObjectsNewToOld;
3087 DCHECK(chunk->IsEvacuationCandidate());
3088 return kObjectsOldToOld;
3089 }
3090
3052 // NewSpacePages with more live bytes than this threshold qualify for fast 3091 // NewSpacePages with more live bytes than this threshold qualify for fast
3053 // evacuation. 3092 // evacuation.
3054 static int PageEvacuationThreshold() { 3093 static int PageEvacuationThreshold() {
3055 if (FLAG_page_promotion) 3094 if (FLAG_page_promotion)
3056 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; 3095 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3057 return Page::kAllocatableMemory + kPointerSize; 3096 return Page::kAllocatableMemory + kPointerSize;
3058 } 3097 }
3059 3098
3060 explicit Evacuator(MarkCompactCollector* collector) 3099 explicit Evacuator(MarkCompactCollector* collector)
3061 : collector_(collector), 3100 : collector_(collector),
3062 compaction_spaces_(collector->heap()), 3101 compaction_spaces_(collector->heap()),
3063 local_pretenuring_feedback_(base::HashMap::PointersMatch, 3102 local_pretenuring_feedback_(base::HashMap::PointersMatch,
3064 kInitialLocalPretenuringFeedbackCapacity), 3103 kInitialLocalPretenuringFeedbackCapacity),
3065 new_space_visitor_(collector->heap(), &compaction_spaces_, 3104 new_space_visitor_(collector->heap(), &compaction_spaces_,
3066 &local_pretenuring_feedback_), 3105 &local_pretenuring_feedback_),
3067 new_space_page_visitor(collector->heap()), 3106 new_space_page_visitor(collector->heap()),
3068 old_space_visitor_(collector->heap(), &compaction_spaces_), 3107 old_space_visitor_(collector->heap(), &compaction_spaces_),
3069 duration_(0.0), 3108 duration_(0.0),
3070 bytes_compacted_(0) {} 3109 bytes_compacted_(0) {}
3071 3110
3072 inline bool EvacuatePage(Page* chunk); 3111 inline bool EvacuatePage(Page* chunk);
3073 3112
3074 // Merge back locally cached info sequentially. Note that this method needs 3113 // Merge back locally cached info sequentially. Note that this method needs
3075 // to be called from the main thread. 3114 // to be called from the main thread.
3076 inline void Finalize(); 3115 inline void Finalize();
3077 3116
3078 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } 3117 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3079 3118
3080 private: 3119 private:
3081 enum EvacuationMode {
3082 kObjectsNewToOld,
3083 kPageNewToOld,
3084 kObjectsOldToOld,
3085 };
3086
3087 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 3120 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3088 3121
3089 inline Heap* heap() { return collector_->heap(); } 3122 inline Heap* heap() { return collector_->heap(); }
3090 3123
3091 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3092 // Note: The order of checks is important in this function.
3093 if (chunk->InNewSpace()) return kObjectsNewToOld;
3094 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3095 return kPageNewToOld;
3096 DCHECK(chunk->IsEvacuationCandidate());
3097 return kObjectsOldToOld;
3098 }
3099
3100 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { 3124 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3101 duration_ += duration; 3125 duration_ += duration;
3102 bytes_compacted_ += bytes_compacted; 3126 bytes_compacted_ += bytes_compacted;
3103 } 3127 }
3104 3128
3105 template <IterationMode mode, class Visitor>
3106 inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
3107
3108 MarkCompactCollector* collector_; 3129 MarkCompactCollector* collector_;
3109 3130
3110 // Locally cached collector data. 3131 // Locally cached collector data.
3111 CompactionSpaceCollection compaction_spaces_; 3132 CompactionSpaceCollection compaction_spaces_;
3112 base::HashMap local_pretenuring_feedback_; 3133 base::HashMap local_pretenuring_feedback_;
3113 3134
3114 // Visitors for the corresponding spaces. 3135 // Visitors for the corresponding spaces.
3115 EvacuateNewSpaceVisitor new_space_visitor_; 3136 EvacuateNewSpaceVisitor new_space_visitor_;
3116 EvacuateNewSpacePageVisitor new_space_page_visitor; 3137 EvacuateNewSpacePageVisitor new_space_page_visitor;
3117 EvacuateOldSpaceVisitor old_space_visitor_; 3138 EvacuateOldSpaceVisitor old_space_visitor_;
3118 3139
3119 // Book keeping info. 3140 // Book keeping info.
3120 double duration_; 3141 double duration_;
3121 intptr_t bytes_compacted_; 3142 intptr_t bytes_compacted_;
3122 }; 3143 };
3123 3144
3124 template <MarkCompactCollector::IterationMode mode, class Visitor> 3145 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3125 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
3126 Visitor* visitor) {
3127 bool success = false; 3146 bool success = false;
3128 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || 3147 DCHECK(page->SweepingDone());
3129 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)); 3148 int saved_live_bytes = page->LiveBytes();
3130 int saved_live_bytes = p->LiveBytes(); 3149 double evacuation_time = 0.0;
3131 double evacuation_time; 3150 Heap* heap = page->heap();
3132 { 3151 {
3133 AlwaysAllocateScope always_allocate(heap()->isolate()); 3152 AlwaysAllocateScope always_allocate(heap->isolate());
3134 TimedScope timed_scope(&evacuation_time); 3153 TimedScope timed_scope(&evacuation_time);
3135 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); 3154 switch (ComputeEvacuationMode(page)) {
3155 case kObjectsNewToOld:
3156 success = collector_->VisitLiveObjects(page, &new_space_visitor_,
3157 kClearMarkbits);
3158 ArrayBufferTracker::ProcessBuffers(
3159 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3160 DCHECK(success);
3161 break;
3162 case kPageNewToOld:
3163 success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
3164 kKeepMarking);
3165 // ArrayBufferTracker will be updated during sweeping.
3166 DCHECK(success);
3167 break;
3168 case kPageNewToNew:
3169 new_space_page_visitor.account_semispace_copied(page->LiveBytes());
3170 // ArrayBufferTracker will be updated during sweeping.
3171 success = true;
3172 break;
3173 case kObjectsOldToOld:
3174 success = collector_->VisitLiveObjects(page, &old_space_visitor_,
3175 kClearMarkbits);
3176 if (!success) {
3177 // Aborted compaction page. We have to record slots here, since we
3178 // might not have recorded them in first place.
3179 // Note: We mark the page as aborted here to be able to record slots
3180 // for code objects in |RecordMigratedSlotVisitor|.
3181 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3182 EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3183 success =
3184 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
3185 ArrayBufferTracker::ProcessBuffers(
3186 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3187 DCHECK(success);
3188 // We need to return failure here to indicate that we want this page
3189 // added to the sweeper.
3190 success = false;
3191 } else {
3192 ArrayBufferTracker::ProcessBuffers(
3193 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3194 }
3195 break;
3196 default:
3197 UNREACHABLE();
3198 }
3136 } 3199 }
3200 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3137 if (FLAG_trace_evacuation) { 3201 if (FLAG_trace_evacuation) {
3138 const char age_mark_tag = 3202 PrintIsolate(heap->isolate(),
3139 !p->InNewSpace() 3203 "evacuation[%p]: page=%p new_space=%d "
3140 ? 'x' 3204 "page_evacuation=%d executable=%d contains_age_mark=%d "
3141 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) 3205 "live_bytes=%d time=%f\n",
3142 ? '>' 3206 static_cast<void*>(this), static_cast<void*>(page),
3143 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' 3207 page->InNewSpace(),
3144 : '#'; 3208 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3145 PrintIsolate(heap()->isolate(), 3209 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3146 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " 3210 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3147 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", 3211 page->Contains(heap->new_space()->age_mark()),
3148 static_cast<void*>(this), static_cast<void*>(p), 3212 saved_live_bytes, evacuation_time);
3149 p->InNewSpace(), age_mark_tag,
3150 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
3151 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
3152 evacuation_time);
3153 }
3154 if (success) {
3155 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3156 } 3213 }
3157 return success; 3214 return success;
3158 } 3215 }
3159 3216
3160 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3161 bool result = false;
3162 DCHECK(page->SweepingDone());
3163 switch (ComputeEvacuationMode(page)) {
3164 case kObjectsNewToOld:
3165 result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
3166 ArrayBufferTracker::ProcessBuffers(
3167 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3168 DCHECK(result);
3169 USE(result);
3170 break;
3171 case kPageNewToOld:
3172 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
3173 // ArrayBufferTracker will be updated during sweeping.
3174 DCHECK(result);
3175 USE(result);
3176 break;
3177 case kObjectsOldToOld:
3178 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
3179 if (!result) {
3180 // Aborted compaction page. We have to record slots here, since we might
3181 // not have recorded them in first place.
3182 // Note: We mark the page as aborted here to be able to record slots
3183 // for code objects in |RecordMigratedSlotVisitor|.
3184 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3185 EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3186 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
3187 ArrayBufferTracker::ProcessBuffers(
3188 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3189 DCHECK(result);
3190 USE(result);
3191 // We need to return failure here to indicate that we want this page
3192 // added to the sweeper.
3193 return false;
3194 }
3195 ArrayBufferTracker::ProcessBuffers(
3196 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3197
3198 break;
3199 default:
3200 UNREACHABLE();
3201 }
3202 return result;
3203 }
3204
3205 void MarkCompactCollector::Evacuator::Finalize() { 3217 void MarkCompactCollector::Evacuator::Finalize() {
3206 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); 3218 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3207 heap()->code_space()->MergeCompactionSpace( 3219 heap()->code_space()->MergeCompactionSpace(
3208 compaction_spaces_.Get(CODE_SPACE)); 3220 compaction_spaces_.Get(CODE_SPACE));
3209 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 3221 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3210 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + 3222 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3211 new_space_page_visitor.promoted_size()); 3223 new_space_page_visitor.promoted_size());
3212 heap()->IncrementSemiSpaceCopiedObjectSize( 3224 heap()->IncrementSemiSpaceCopiedObjectSize(
3213 new_space_visitor_.semispace_copied_size()); 3225 new_space_visitor_.semispace_copied_size() +
3226 new_space_page_visitor.semispace_copied_size());
3214 heap()->IncrementYoungSurvivorsCounter( 3227 heap()->IncrementYoungSurvivorsCounter(
3215 new_space_visitor_.promoted_size() + 3228 new_space_visitor_.promoted_size() +
3216 new_space_visitor_.semispace_copied_size() + 3229 new_space_visitor_.semispace_copied_size() +
3217 new_space_page_visitor.promoted_size()); 3230 new_space_page_visitor.promoted_size() +
3231 new_space_page_visitor.semispace_copied_size());
3218 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); 3232 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3219 } 3233 }
3220 3234
3221 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, 3235 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3222 intptr_t live_bytes) { 3236 intptr_t live_bytes) {
3223 if (!FLAG_parallel_compaction) return 1; 3237 if (!FLAG_parallel_compaction) return 1;
3224 // Compute the number of needed tasks based on a target compaction time, the 3238 // Compute the number of needed tasks based on a target compaction time, the
3225 // profiled compaction speed and marked live memory. 3239 // profiled compaction speed and marked live memory.
3226 // 3240 //
3227 // The number of parallel compaction tasks is limited by: 3241 // The number of parallel compaction tasks is limited by:
(...skipping 27 matching lines...) Expand all
3255 3269
3256 static const bool NeedSequentialFinalization = true; 3270 static const bool NeedSequentialFinalization = true;
3257 3271
3258 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, 3272 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3259 MemoryChunk* chunk, PerPageData) { 3273 MemoryChunk* chunk, PerPageData) {
3260 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk)); 3274 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
3261 } 3275 }
3262 3276
3263 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, 3277 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3264 bool success, PerPageData data) { 3278 bool success, PerPageData data) {
3265 if (chunk->InNewSpace()) { 3279 using Evacuator = MarkCompactCollector::Evacuator;
3266 DCHECK(success); 3280 Page* p = static_cast<Page*>(chunk);
3267 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { 3281 switch (Evacuator::ComputeEvacuationMode(p)) {
3268 DCHECK(success); 3282 case Evacuator::kPageNewToOld:
3269 Page* p = static_cast<Page*>(chunk); 3283 break;
3270 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); 3284 case Evacuator::kPageNewToNew:
3271 p->ForAllFreeListCategories( 3285 DCHECK(success);
3272 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); 3286 break;
3273 heap->mark_compact_collector()->sweeper().AddLatePage( 3287 case Evacuator::kObjectsNewToOld:
3274 p->owner()->identity(), p); 3288 DCHECK(success);
3275 } else { 3289 break;
3276 Page* p = static_cast<Page*>(chunk); 3290 case Evacuator::kObjectsOldToOld:
3277 if (success) { 3291 if (success) {
3278 DCHECK(p->IsEvacuationCandidate()); 3292 DCHECK(p->IsEvacuationCandidate());
3279 DCHECK(p->SweepingDone()); 3293 DCHECK(p->SweepingDone());
3280 p->Unlink(); 3294 p->Unlink();
3281 } else { 3295 } else {
3282 // We have partially compacted the page, i.e., some objects may have 3296 // We have partially compacted the page, i.e., some objects may have
3283 // moved, others are still in place. 3297 // moved, others are still in place.
3284 p->ClearEvacuationCandidate(); 3298 p->ClearEvacuationCandidate();
3285 // Slots have already been recorded so we just need to add it to the 3299 // Slots have already been recorded so we just need to add it to the
3286 // sweeper. 3300 // sweeper, which will happen after updating pointers.
3287 *data += 1; 3301 *data += 1;
3288 } 3302 }
3303 break;
3304 default:
3305 UNREACHABLE();
3289 } 3306 }
3290 } 3307 }
3291 }; 3308 };
3292 3309
3293 void MarkCompactCollector::EvacuatePagesInParallel() { 3310 void MarkCompactCollector::EvacuatePagesInParallel() {
3294 PageParallelJob<EvacuationJobTraits> job( 3311 PageParallelJob<EvacuationJobTraits> job(
3295 heap_, heap_->isolate()->cancelable_task_manager(), 3312 heap_, heap_->isolate()->cancelable_task_manager(),
3296 &page_parallel_job_semaphore_); 3313 &page_parallel_job_semaphore_);
3297 3314
3298 int abandoned_pages = 0; 3315 int abandoned_pages = 0;
3299 intptr_t live_bytes = 0; 3316 intptr_t live_bytes = 0;
3300 for (Page* page : evacuation_candidates_) { 3317 for (Page* page : evacuation_candidates_) {
3301 live_bytes += page->LiveBytes(); 3318 live_bytes += page->LiveBytes();
3302 job.AddPage(page, &abandoned_pages); 3319 job.AddPage(page, &abandoned_pages);
3303 } 3320 }
3304 3321
3305 const Address age_mark = heap()->new_space()->age_mark(); 3322 const Address age_mark = heap()->new_space()->age_mark();
3306 for (Page* page : newspace_evacuation_candidates_) { 3323 for (Page* page : newspace_evacuation_candidates_) {
3307 live_bytes += page->LiveBytes(); 3324 live_bytes += page->LiveBytes();
3308 if (!page->NeverEvacuate() && 3325 if (!page->NeverEvacuate() &&
3309 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) && 3326 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3310 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
3311 !page->Contains(age_mark)) { 3327 !page->Contains(age_mark)) {
3312 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space()); 3328 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3329 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3330 } else {
3331 EvacuateNewSpacePageVisitor::MoveToToSpace(page);
3332 }
3313 } 3333 }
3334
3314 job.AddPage(page, &abandoned_pages); 3335 job.AddPage(page, &abandoned_pages);
3315 } 3336 }
3316 DCHECK_GE(job.NumberOfPages(), 1); 3337 DCHECK_GE(job.NumberOfPages(), 1);
3317 3338
3318 // Used for trace summary. 3339 // Used for trace summary.
3319 double compaction_speed = 0; 3340 double compaction_speed = 0;
3320 if (FLAG_trace_evacuation) { 3341 if (FLAG_trace_evacuation) {
3321 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3342 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3322 } 3343 }
3323 3344
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
3357 return map_word.ToForwardingAddress(); 3378 return map_word.ToForwardingAddress();
3358 } 3379 }
3359 } 3380 }
3360 return object; 3381 return object;
3361 } 3382 }
3362 }; 3383 };
3363 3384
3364 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, 3385 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
3365 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, 3386 MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
3366 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, 3387 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
3388 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
3367 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> 3389 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
3368 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, 3390 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
3369 ObjectVisitor* v) { 3391 ObjectVisitor* v) {
3370 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); 3392 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3371 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); 3393 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
3372 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, 3394 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
3373 space->identity() == CODE_SPACE); 3395 (skip_list_mode == REBUILD_SKIP_LIST));
3374 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); 3396 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3375 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); 3397 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
3376 3398
3377 // Before we sweep objects on the page, we free dead array buffers which 3399 // Before we sweep objects on the page, we free dead array buffers which
3378 // requires valid mark bits. 3400 // requires valid mark bits.
3379 ArrayBufferTracker::FreeDead(p); 3401 ArrayBufferTracker::FreeDead(p);
3380 3402
3381 Address free_start = p->area_start(); 3403 Address free_start = p->area_start();
3382 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); 3404 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3383 3405
(...skipping 12 matching lines...) Expand all
3396 LiveObjectIterator<kBlackObjects> it(p); 3418 LiveObjectIterator<kBlackObjects> it(p);
3397 HeapObject* object = NULL; 3419 HeapObject* object = NULL;
3398 while ((object = it.Next()) != NULL) { 3420 while ((object = it.Next()) != NULL) {
3399 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3421 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3400 Address free_end = object->address(); 3422 Address free_end = object->address();
3401 if (free_end != free_start) { 3423 if (free_end != free_start) {
3402 int size = static_cast<int>(free_end - free_start); 3424 int size = static_cast<int>(free_end - free_start);
3403 if (free_space_mode == ZAP_FREE_SPACE) { 3425 if (free_space_mode == ZAP_FREE_SPACE) {
3404 memset(free_start, 0xcc, size); 3426 memset(free_start, 0xcc, size);
3405 } 3427 }
3406 freed_bytes = space->UnaccountedFree(free_start, size); 3428 if (free_list_mode == REBUILD_FREE_LIST) {
3407 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3429 freed_bytes = space->UnaccountedFree(free_start, size);
3430 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3431 } else {
3432 p->heap()->CreateFillerObjectAt(free_start, size,
3433 ClearRecordedSlots::kNo);
3434 }
3408 } 3435 }
3409 Map* map = object->synchronized_map(); 3436 Map* map = object->synchronized_map();
3410 int size = object->SizeFromMap(map); 3437 int size = object->SizeFromMap(map);
3411 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { 3438 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3412 object->IterateBody(map->instance_type(), size, v); 3439 object->IterateBody(map->instance_type(), size, v);
3413 } 3440 }
3414 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { 3441 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3415 int new_region_start = SkipList::RegionNumber(free_end); 3442 int new_region_start = SkipList::RegionNumber(free_end);
3416 int new_region_end = 3443 int new_region_end =
3417 SkipList::RegionNumber(free_end + size - kPointerSize); 3444 SkipList::RegionNumber(free_end + size - kPointerSize);
3418 if (new_region_start != curr_region || new_region_end != curr_region) { 3445 if (new_region_start != curr_region || new_region_end != curr_region) {
3419 skip_list->AddObject(free_end, size); 3446 skip_list->AddObject(free_end, size);
3420 curr_region = new_region_end; 3447 curr_region = new_region_end;
3421 } 3448 }
3422 } 3449 }
3423 free_start = free_end + size; 3450 free_start = free_end + size;
3424 } 3451 }
3425 3452
3426 // Clear the mark bits of that page and reset live bytes count. 3453 // Clear the mark bits of that page and reset live bytes count.
3427 Bitmap::Clear(p); 3454 Bitmap::Clear(p);
3428 3455
3429 if (free_start != p->area_end()) { 3456 if (free_start != p->area_end()) {
3430 int size = static_cast<int>(p->area_end() - free_start); 3457 int size = static_cast<int>(p->area_end() - free_start);
3431 if (free_space_mode == ZAP_FREE_SPACE) { 3458 if (free_space_mode == ZAP_FREE_SPACE) {
3432 memset(free_start, 0xcc, size); 3459 memset(free_start, 0xcc, size);
3433 } 3460 }
3434 freed_bytes = space->UnaccountedFree(free_start, size); 3461 if (free_list_mode == REBUILD_FREE_LIST) {
3435 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3462 freed_bytes = space->UnaccountedFree(free_start, size);
3463 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3464 } else {
3465 p->heap()->CreateFillerObjectAt(free_start, size,
3466 ClearRecordedSlots::kNo);
3467 }
3436 } 3468 }
3437 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); 3469 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3470 if (free_list_mode == IGNORE_FREE_LIST) return 0;
3438 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); 3471 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3439 } 3472 }
3440 3473
3441 void MarkCompactCollector::InvalidateCode(Code* code) { 3474 void MarkCompactCollector::InvalidateCode(Code* code) {
3442 if (heap_->incremental_marking()->IsCompacting() && 3475 if (heap_->incremental_marking()->IsCompacting() &&
3443 !ShouldSkipEvacuationSlotRecording(code)) { 3476 !ShouldSkipEvacuationSlotRecording(code)) {
3444 DCHECK(compacting_); 3477 DCHECK(compacting_);
3445 3478
3446 // If the object is white than no slots were recorded on it yet. 3479 // If the object is white than no slots were recorded on it yet.
3447 MarkBit mark_bit = Marking::MarkBitFrom(code); 3480 MarkBit mark_bit = Marking::MarkBitFrom(code);
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
3543 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3576 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3544 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); 3577 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3545 Heap::RelocationLock relocation_lock(heap()); 3578 Heap::RelocationLock relocation_lock(heap());
3546 3579
3547 { 3580 {
3548 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY); 3581 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3549 EvacuationScope evacuation_scope(this); 3582 EvacuationScope evacuation_scope(this);
3550 3583
3551 EvacuateNewSpacePrologue(); 3584 EvacuateNewSpacePrologue();
3552 EvacuatePagesInParallel(); 3585 EvacuatePagesInParallel();
3553 EvacuateNewSpaceEpilogue();
3554 heap()->new_space()->set_age_mark(heap()->new_space()->top()); 3586 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3555 } 3587 }
3556 3588
3557 UpdatePointersAfterEvacuation(); 3589 UpdatePointersAfterEvacuation();
3558 3590
3591 if (!heap()->new_space()->Rebalance()) {
3592 FatalProcessOutOfMemory("NewSpace::Rebalance");
3593 }
3594
3559 // Give pages that are queued to be freed back to the OS. Note that filtering 3595 // Give pages that are queued to be freed back to the OS. Note that filtering
3560 // slots only handles old space (for unboxed doubles), and thus map space can 3596 // slots only handles old space (for unboxed doubles), and thus map space can
3561 // still contain stale pointers. We only free the chunks after pointer updates 3597 // still contain stale pointers. We only free the chunks after pointer updates
3562 // to still have access to page headers. 3598 // to still have access to page headers.
3563 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 3599 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3564 3600
3565 { 3601 {
3566 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 3602 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3567 3603
3604 for (Page* p : newspace_evacuation_candidates_) {
3605 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3606 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3607 sweeper().AddLatePage(p->owner()->identity(), p);
3608 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3609 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3610 p->ForAllFreeListCategories(
3611 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3612 sweeper().AddLatePage(p->owner()->identity(), p);
3613 }
3614 }
3615 newspace_evacuation_candidates_.Rewind(0);
3616
3568 for (Page* p : evacuation_candidates_) { 3617 for (Page* p : evacuation_candidates_) {
3569 // Important: skip list should be cleared only after roots were updated 3618 // Important: skip list should be cleared only after roots were updated
3570 // because root iteration traverses the stack and might have to find 3619 // because root iteration traverses the stack and might have to find
3571 // code objects from non-updated pc pointing into evacuation candidate. 3620 // code objects from non-updated pc pointing into evacuation candidate.
3572 SkipList* list = p->skip_list(); 3621 SkipList* list = p->skip_list();
3573 if (list != NULL) list->Clear(); 3622 if (list != NULL) list->Clear();
3574 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 3623 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3575 sweeper().AddLatePage(p->owner()->identity(), p); 3624 sweeper().AddLatePage(p->owner()->identity(), p);
3576 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); 3625 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3577 } 3626 }
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
3652 // Update the corresponding slot. 3701 // Update the corresponding slot.
3653 *slot = map_word.ToForwardingAddress(); 3702 *slot = map_word.ToForwardingAddress();
3654 } 3703 }
3655 // If the object was in from space before and is after executing the 3704 // If the object was in from space before and is after executing the
3656 // callback in to space, the object is still live. 3705 // callback in to space, the object is still live.
3657 // Unfortunately, we do not know about the slot. It could be in a 3706 // Unfortunately, we do not know about the slot. It could be in a
3658 // just freed free space object. 3707 // just freed free space object.
3659 if (heap->InToSpace(*slot)) { 3708 if (heap->InToSpace(*slot)) {
3660 return KEEP_SLOT; 3709 return KEEP_SLOT;
3661 } 3710 }
3711 } else if (heap->InToSpace(*slot)) {
3712 DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
3713 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
3714 // Slots can be in "to" space after a page has been moved. Since there is
3715 // no forwarding information present we need to check the markbits to
3716 // determine liveness.
3717 if (Marking::IsBlack(
3718 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
3719 return KEEP_SLOT;
3662 } else { 3720 } else {
3663 DCHECK(!heap->InNewSpace(*slot)); 3721 DCHECK(!heap->InNewSpace(*slot));
3664 } 3722 }
3665 return REMOVE_SLOT; 3723 return REMOVE_SLOT;
3666 } 3724 }
3667 }; 3725 };
3668 3726
3669 int NumberOfPointerUpdateTasks(int pages) { 3727 int NumberOfPointerUpdateTasks(int pages) {
3670 if (!FLAG_parallel_pointer_update) return 1; 3728 if (!FLAG_parallel_pointer_update) return 1;
3671 const int kMaxTasks = 4; 3729 const int kMaxTasks = 4;
(...skipping 12 matching lines...) Expand all
3684 job.Run(num_tasks, [](int i) { return 0; }); 3742 job.Run(num_tasks, [](int i) { return 0; });
3685 } 3743 }
3686 3744
3687 class ToSpacePointerUpdateJobTraits { 3745 class ToSpacePointerUpdateJobTraits {
3688 public: 3746 public:
3689 typedef std::pair<Address, Address> PerPageData; 3747 typedef std::pair<Address, Address> PerPageData;
3690 typedef PointersUpdatingVisitor* PerTaskData; 3748 typedef PointersUpdatingVisitor* PerTaskData;
3691 3749
3692 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor, 3750 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3693 MemoryChunk* chunk, PerPageData limits) { 3751 MemoryChunk* chunk, PerPageData limits) {
3752 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3753 // New->new promoted pages contain garbage so they require iteration
3754 // using markbits.
3755 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
3756 } else {
3757 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
3758 }
3759 return true;
3760 }
3761
3762 static const bool NeedSequentialFinalization = false;
3763 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3764 }
3765
3766 private:
3767 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
3768 MemoryChunk* chunk,
3769 PerPageData limits) {
3694 for (Address cur = limits.first; cur < limits.second;) { 3770 for (Address cur = limits.first; cur < limits.second;) {
3695 HeapObject* object = HeapObject::FromAddress(cur); 3771 HeapObject* object = HeapObject::FromAddress(cur);
3696 Map* map = object->map(); 3772 Map* map = object->map();
3697 int size = object->SizeFromMap(map); 3773 int size = object->SizeFromMap(map);
3698 object->IterateBody(map->instance_type(), size, visitor); 3774 object->IterateBody(map->instance_type(), size, visitor);
3699 cur += size; 3775 cur += size;
3700 } 3776 }
3701 return true;
3702 } 3777 }
3703 static const bool NeedSequentialFinalization = false; 3778
3704 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { 3779 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
3780 MemoryChunk* chunk,
3781 PerPageData limits) {
3782 LiveObjectIterator<kBlackObjects> it(chunk);
3783 HeapObject* object = NULL;
3784 while ((object = it.Next()) != NULL) {
3785 Map* map = object->map();
3786 int size = object->SizeFromMap(map);
3787 object->IterateBody(map->instance_type(), size, visitor);
3788 }
3705 } 3789 }
3706 }; 3790 };
3707 3791
3708 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) { 3792 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3709 PageParallelJob<ToSpacePointerUpdateJobTraits> job( 3793 PageParallelJob<ToSpacePointerUpdateJobTraits> job(
3710 heap, heap->isolate()->cancelable_task_manager(), semaphore); 3794 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3711 Address space_start = heap->new_space()->bottom(); 3795 Address space_start = heap->new_space()->bottom();
3712 Address space_end = heap->new_space()->top(); 3796 Address space_end = heap->new_space()->top();
3713 NewSpacePageIterator it(space_start, space_end); 3797 NewSpacePageIterator it(space_start, space_end);
3714 while (it.has_next()) { 3798 while (it.has_next()) {
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
3770 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); 3854 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3771 } 3855 }
3772 3856
3773 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, 3857 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
3774 int required_freed_bytes, 3858 int required_freed_bytes,
3775 int max_pages) { 3859 int max_pages) {
3776 int max_freed = 0; 3860 int max_freed = 0;
3777 int pages_freed = 0; 3861 int pages_freed = 0;
3778 Page* page = nullptr; 3862 Page* page = nullptr;
3779 while ((page = GetSweepingPageSafe(identity)) != nullptr) { 3863 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
3780 int freed = ParallelSweepPage(page, heap_->paged_space(identity)); 3864 int freed = ParallelSweepPage(page, identity);
3781 pages_freed += 1; 3865 pages_freed += 1;
3782 DCHECK_GE(freed, 0); 3866 DCHECK_GE(freed, 0);
3783 max_freed = Max(max_freed, freed); 3867 max_freed = Max(max_freed, freed);
3784 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) 3868 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
3785 return max_freed; 3869 return max_freed;
3786 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; 3870 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
3787 } 3871 }
3788 return max_freed; 3872 return max_freed;
3789 } 3873 }
3790 3874
3791 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, 3875 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
3792 PagedSpace* space) { 3876 AllocationSpace identity) {
3793 int max_freed = 0; 3877 int max_freed = 0;
3794 if (page->mutex()->TryLock()) { 3878 if (page->mutex()->TryLock()) {
3795 // If this page was already swept in the meantime, we can return here. 3879 // If this page was already swept in the meantime, we can return here.
3796 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { 3880 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
3797 page->mutex()->Unlock(); 3881 page->mutex()->Unlock();
3798 return 0; 3882 return 0;
3799 } 3883 }
3800 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 3884 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3801 if (space->identity() == OLD_SPACE) { 3885 if (identity == NEW_SPACE) {
3886 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3887 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
3888 } else if (identity == OLD_SPACE) {
3802 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, 3889 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3803 IGNORE_FREE_SPACE>(space, page, NULL); 3890 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3804 } else if (space->identity() == CODE_SPACE) { 3891 heap_->paged_space(identity), page, nullptr);
3892 } else if (identity == CODE_SPACE) {
3805 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, 3893 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3806 IGNORE_FREE_SPACE>(space, page, NULL); 3894 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3895 heap_->paged_space(identity), page, nullptr);
3807 } else { 3896 } else {
3808 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, 3897 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3809 IGNORE_FREE_SPACE>(space, page, NULL); 3898 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3899 heap_->paged_space(identity), page, nullptr);
3810 } 3900 }
3811 { 3901 {
3812 base::LockGuard<base::Mutex> guard(&mutex_); 3902 base::LockGuard<base::Mutex> guard(&mutex_);
3813 swept_list_[space->identity()].Add(page); 3903 swept_list_[identity].Add(page);
3814 } 3904 }
3815 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); 3905 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3816 page->mutex()->Unlock(); 3906 page->mutex()->Unlock();
3817 } 3907 }
3818 return max_freed; 3908 return max_freed;
3819 } 3909 }
3820 3910
3821 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { 3911 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3822 DCHECK(!sweeping_in_progress_); 3912 DCHECK(!sweeping_in_progress_);
3823 PrepareToBeSweptPage(space, page); 3913 PrepareToBeSweptPage(space, page);
3824 sweeping_list_[space].push_back(page); 3914 sweeping_list_[space].push_back(page);
3825 } 3915 }
3826 3916
3827 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, 3917 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
3828 Page* page) { 3918 Page* page) {
3829 DCHECK(sweeping_in_progress_); 3919 DCHECK(sweeping_in_progress_);
3830 PrepareToBeSweptPage(space, page); 3920 PrepareToBeSweptPage(space, page);
3831 late_pages_ = true; 3921 late_pages_ = true;
3832 AddSweepingPageSafe(space, page); 3922 AddSweepingPageSafe(space, page);
3833 } 3923 }
3834 3924
3835 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, 3925 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3836 Page* page) { 3926 Page* page) {
3837 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); 3927 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3838 int to_sweep = page->area_size() - page->LiveBytes(); 3928 int to_sweep = page->area_size() - page->LiveBytes();
3839 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); 3929 if (space != NEW_SPACE)
3930 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
3840 } 3931 }
3841 3932
3842 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( 3933 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3843 AllocationSpace space) { 3934 AllocationSpace space) {
3844 base::LockGuard<base::Mutex> guard(&mutex_); 3935 base::LockGuard<base::Mutex> guard(&mutex_);
3845 Page* page = nullptr; 3936 Page* page = nullptr;
3846 if (!sweeping_list_[space].empty()) { 3937 if (!sweeping_list_[space].empty()) {
3847 page = sweeping_list_[space].front(); 3938 page = sweeping_list_[space].front();
3848 sweeping_list_[space].pop_front(); 3939 sweeping_list_[space].pop_front();
3849 } 3940 }
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
3893 continue; 3984 continue;
3894 } 3985 }
3895 3986
3896 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { 3987 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3897 // We need to sweep the page to get it into an iterable state again. Note 3988 // We need to sweep the page to get it into an iterable state again. Note
3898 // that this adds unusable memory into the free list that is later on 3989 // that this adds unusable memory into the free list that is later on
3899 // (in the free list) dropped again. Since we only use the flag for 3990 // (in the free list) dropped again. Since we only use the flag for
3900 // testing this is fine. 3991 // testing this is fine.
3901 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 3992 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3902 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, 3993 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
3903 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( 3994 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
3904 space, p, nullptr); 3995 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
3905 continue; 3996 continue;
3906 } 3997 }
3907 3998
3908 // One unused page is kept, all further are released before sweeping them. 3999 // One unused page is kept, all further are released before sweeping them.
3909 if (p->LiveBytes() == 0) { 4000 if (p->LiveBytes() == 0) {
3910 if (unused_page_present) { 4001 if (unused_page_present) {
3911 if (FLAG_gc_verbose) { 4002 if (FLAG_gc_verbose) {
3912 PrintIsolate(isolate(), "sweeping: released page: %p", 4003 PrintIsolate(isolate(), "sweeping: released page: %p",
3913 static_cast<void*>(p)); 4004 static_cast<void*>(p));
3914 } 4005 }
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
4000 MarkBit mark_bit = Marking::MarkBitFrom(host); 4091 MarkBit mark_bit = Marking::MarkBitFrom(host);
4001 if (Marking::IsBlack(mark_bit)) { 4092 if (Marking::IsBlack(mark_bit)) {
4002 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 4093 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4003 RecordRelocSlot(host, &rinfo, target); 4094 RecordRelocSlot(host, &rinfo, target);
4004 } 4095 }
4005 } 4096 }
4006 } 4097 }
4007 4098
4008 } // namespace internal 4099 } // namespace internal
4009 } // namespace v8 4100 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698