Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1863983002: 🏄 [heap] Add page evacuation mode for new->old (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Disable optimize_for_size for the feature test as we require >1 page new space Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1648 matching lines...) Expand 10 before | Expand all | Expand 10 after
1659 explicit EvacuateNewSpaceVisitor(Heap* heap, 1659 explicit EvacuateNewSpaceVisitor(Heap* heap,
1660 CompactionSpaceCollection* compaction_spaces, 1660 CompactionSpaceCollection* compaction_spaces,
1661 HashMap* local_pretenuring_feedback) 1661 HashMap* local_pretenuring_feedback)
1662 : EvacuateVisitorBase(heap, compaction_spaces), 1662 : EvacuateVisitorBase(heap, compaction_spaces),
1663 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1663 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1664 space_to_allocate_(NEW_SPACE), 1664 space_to_allocate_(NEW_SPACE),
1665 promoted_size_(0), 1665 promoted_size_(0),
1666 semispace_copied_size_(0), 1666 semispace_copied_size_(0),
1667 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1667 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1668 1668
1669 bool Visit(HeapObject* object) override { 1669 inline bool Visit(HeapObject* object) override {
1670 heap_->UpdateAllocationSite<Heap::kCached>(object, 1670 heap_->UpdateAllocationSite<Heap::kCached>(object,
1671 local_pretenuring_feedback_); 1671 local_pretenuring_feedback_);
1672 int size = object->Size(); 1672 int size = object->Size();
1673 HeapObject* target_object = nullptr; 1673 HeapObject* target_object = nullptr;
1674 if (heap_->ShouldBePromoted(object->address(), size) && 1674 if (heap_->ShouldBePromoted(object->address(), size) &&
1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, 1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1676 &target_object)) { 1676 &target_object)) {
1677 // If we end up needing more special cases, we should factor this out. 1677 // If we end up needing more special cases, we should factor this out.
1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1679 heap_->array_buffer_tracker()->Promote( 1679 heap_->array_buffer_tracker()->Promote(
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 return allocation; 1791 return allocation;
1792 } 1792 }
1793 1793
1794 LocalAllocationBuffer buffer_; 1794 LocalAllocationBuffer buffer_;
1795 AllocationSpace space_to_allocate_; 1795 AllocationSpace space_to_allocate_;
1796 intptr_t promoted_size_; 1796 intptr_t promoted_size_;
1797 intptr_t semispace_copied_size_; 1797 intptr_t semispace_copied_size_;
1798 HashMap* local_pretenuring_feedback_; 1798 HashMap* local_pretenuring_feedback_;
1799 }; 1799 };
1800 1800
1801 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1802 : public MarkCompactCollector::HeapObjectVisitor {
1803 public:
1804 EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
1805
1806 static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
1807 page->heap()->new_space()->ReplaceWithEmptyPage(page);
1808 Page* new_page = Page::Convert(page, owner);
1809 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1810 }
1811
1812 inline bool Visit(HeapObject* object) {
1813 if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
1814 object->GetHeap()->array_buffer_tracker()->Promote(
1815 JSArrayBuffer::cast(object));
1816 }
1817 RecordMigratedSlotVisitor visitor;
1818 object->IterateBodyFast(&visitor);
1819 promoted_size_ += object->Size();
1820 return true;
1821 }
1822
1823 intptr_t promoted_size() { return promoted_size_; }
1824
1825 private:
1826 intptr_t promoted_size_;
1827 };
1801 1828
1802 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1829 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1803 : public MarkCompactCollector::EvacuateVisitorBase { 1830 : public MarkCompactCollector::EvacuateVisitorBase {
1804 public: 1831 public:
1805 EvacuateOldSpaceVisitor(Heap* heap, 1832 EvacuateOldSpaceVisitor(Heap* heap,
1806 CompactionSpaceCollection* compaction_spaces) 1833 CompactionSpaceCollection* compaction_spaces)
1807 : EvacuateVisitorBase(heap, compaction_spaces) {} 1834 : EvacuateVisitorBase(heap, compaction_spaces) {}
1808 1835
1809 bool Visit(HeapObject* object) override { 1836 inline bool Visit(HeapObject* object) override {
1810 CompactionSpace* target_space = compaction_spaces_->Get( 1837 CompactionSpace* target_space = compaction_spaces_->Get(
1811 Page::FromAddress(object->address())->owner()->identity()); 1838 Page::FromAddress(object->address())->owner()->identity());
1812 HeapObject* target_object = nullptr; 1839 HeapObject* target_object = nullptr;
1813 if (TryEvacuateObject(target_space, object, &target_object)) { 1840 if (TryEvacuateObject(target_space, object, &target_object)) {
1814 DCHECK(object->map_word().IsForwardingAddress()); 1841 DCHECK(object->map_word().IsForwardingAddress());
1815 return true; 1842 return true;
1816 } 1843 }
1817 return false; 1844 return false;
1818 } 1845 }
1819 }; 1846 };
(...skipping 1194 matching lines...) Expand 10 before | Expand all | Expand 10 after
3014 newspace_evacuation_candidates_.Add(it.next()); 3041 newspace_evacuation_candidates_.Add(it.next());
3015 } 3042 }
3016 new_space->Flip(); 3043 new_space->Flip();
3017 new_space->ResetAllocationInfo(); 3044 new_space->ResetAllocationInfo();
3018 } 3045 }
3019 3046
3020 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { 3047 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3021 newspace_evacuation_candidates_.Rewind(0); 3048 newspace_evacuation_candidates_.Rewind(0);
3022 } 3049 }
3023 3050
3024
3025 class MarkCompactCollector::Evacuator : public Malloced { 3051 class MarkCompactCollector::Evacuator : public Malloced {
3026 public: 3052 public:
3053 // NewSpacePages with more live bytes than this threshold qualify for fast
3054 // evacuation.
3055 static int PageEvacuationThreshold() {
3056 if (FLAG_page_promotion)
3057 return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
3058 100;
3059 return NewSpacePage::kAllocatableMemory + kPointerSize;
3060 }
3061
3027 explicit Evacuator(MarkCompactCollector* collector) 3062 explicit Evacuator(MarkCompactCollector* collector)
3028 : collector_(collector), 3063 : collector_(collector),
3029 compaction_spaces_(collector->heap()), 3064 compaction_spaces_(collector->heap()),
3030 local_pretenuring_feedback_(HashMap::PointersMatch, 3065 local_pretenuring_feedback_(HashMap::PointersMatch,
3031 kInitialLocalPretenuringFeedbackCapacity), 3066 kInitialLocalPretenuringFeedbackCapacity),
3032 new_space_visitor_(collector->heap(), &compaction_spaces_, 3067 new_space_visitor_(collector->heap(), &compaction_spaces_,
3033 &local_pretenuring_feedback_), 3068 &local_pretenuring_feedback_),
3069 new_space_page_visitor(),
3034 old_space_visitor_(collector->heap(), &compaction_spaces_), 3070 old_space_visitor_(collector->heap(), &compaction_spaces_),
3035 duration_(0.0), 3071 duration_(0.0),
3036 bytes_compacted_(0) {} 3072 bytes_compacted_(0) {}
3037 3073
3038 inline bool EvacuatePage(MemoryChunk* chunk); 3074 inline bool EvacuatePage(MemoryChunk* chunk);
3039 3075
3040 // Merge back locally cached info sequentially. Note that this method needs 3076 // Merge back locally cached info sequentially. Note that this method needs
3041 // to be called from the main thread. 3077 // to be called from the main thread.
3042 inline void Finalize(); 3078 inline void Finalize();
3043 3079
3044 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } 3080 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3045 3081
3046 private: 3082 private:
3083 enum EvacuationMode {
3084 kObjectsNewToOld,
3085 kPageNewToOld,
3086 kObjectsOldToOld,
3087 };
3088
3047 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 3089 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3048 3090
3049 Heap* heap() { return collector_->heap(); } 3091 inline Heap* heap() { return collector_->heap(); }
3092
3093 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3094 // Note: The order of checks is important in this function.
3095 if (chunk->InNewSpace()) return kObjectsNewToOld;
3096 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3097 return kPageNewToOld;
3098 DCHECK(chunk->IsEvacuationCandidate());
3099 return kObjectsOldToOld;
3100 }
3050 3101
3051 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { 3102 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3052 duration_ += duration; 3103 duration_ += duration;
3053 bytes_compacted_ += bytes_compacted; 3104 bytes_compacted_ += bytes_compacted;
3054 } 3105 }
3055 3106
3056 template <IterationMode mode> 3107 template <IterationMode mode, class Visitor>
3057 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); 3108 inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
3058 3109
3059 MarkCompactCollector* collector_; 3110 MarkCompactCollector* collector_;
3060 3111
3061 // Locally cached collector data. 3112 // Locally cached collector data.
3062 CompactionSpaceCollection compaction_spaces_; 3113 CompactionSpaceCollection compaction_spaces_;
3063 HashMap local_pretenuring_feedback_; 3114 HashMap local_pretenuring_feedback_;
3064 3115
3065 // Visitors for the corresponding spaces. 3116 // Visitors for the corresponding spaces.
3066 EvacuateNewSpaceVisitor new_space_visitor_; 3117 EvacuateNewSpaceVisitor new_space_visitor_;
3118 EvacuateNewSpacePageVisitor new_space_page_visitor;
3067 EvacuateOldSpaceVisitor old_space_visitor_; 3119 EvacuateOldSpaceVisitor old_space_visitor_;
3068 3120
3069 // Book keeping info. 3121 // Book keeping info.
3070 double duration_; 3122 double duration_;
3071 intptr_t bytes_compacted_; 3123 intptr_t bytes_compacted_;
3072 }; 3124 };
3073 3125
3074 template <MarkCompactCollector::IterationMode mode> 3126 template <MarkCompactCollector::IterationMode mode, class Visitor>
3075 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( 3127 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
3076 MemoryChunk* p, HeapObjectVisitor* visitor) { 3128 Visitor* visitor) {
3077 bool success = false; 3129 bool success = false;
3078 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); 3130 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
3131 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
3079 int saved_live_bytes = p->LiveBytes(); 3132 int saved_live_bytes = p->LiveBytes();
3080 double evacuation_time; 3133 double evacuation_time;
3081 { 3134 {
3082 AlwaysAllocateScope always_allocate(heap()->isolate()); 3135 AlwaysAllocateScope always_allocate(heap()->isolate());
3083 TimedScope timed_scope(&evacuation_time); 3136 TimedScope timed_scope(&evacuation_time);
3084 success = collector_->VisitLiveObjects(p, visitor, mode); 3137 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
3085 } 3138 }
3086 if (FLAG_trace_evacuation) { 3139 if (FLAG_trace_evacuation) {
3087 const char age_mark_tag = 3140 const char age_mark_tag =
3088 !p->InNewSpace() 3141 !p->InNewSpace()
3089 ? 'x' 3142 ? 'x'
3090 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) 3143 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
3091 ? '>' 3144 ? '>'
3092 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' 3145 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
3093 : '#'; 3146 : '#';
3094 PrintIsolate(heap()->isolate(), 3147 PrintIsolate(heap()->isolate(),
3095 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " 3148 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
3096 "executable=%d live_bytes=%d time=%f\n", 3149 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
3097 this, p, p->InNewSpace(), age_mark_tag, 3150 this, p, p->InNewSpace(), age_mark_tag,
3151 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
3098 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, 3152 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
3099 evacuation_time); 3153 evacuation_time);
3100 } 3154 }
3101 if (success) { 3155 if (success) {
3102 ReportCompactionProgress(evacuation_time, saved_live_bytes); 3156 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3103 } 3157 }
3104 return success; 3158 return success;
3105 } 3159 }
3106 3160
3107 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { 3161 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
3108 bool success = false; 3162 bool result = false;
3109 if (chunk->InNewSpace()) { 3163 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
3110 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), 3164 NewSpacePage::kSweepingDone);
3111 NewSpacePage::kSweepingDone); 3165 switch (ComputeEvacuationMode(chunk)) {
3112 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); 3166 case kObjectsNewToOld:
3113 DCHECK(success); 3167 result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
3114 USE(success); 3168 DCHECK(result);
3115 } else { 3169 USE(result);
3116 DCHECK(chunk->IsEvacuationCandidate()); 3170 break;
3117 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); 3171 case kPageNewToOld:
3118 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); 3172 result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
3119 if (!success) { 3173 DCHECK(result);
3120 // Aborted compaction page. We can record slots here to have them 3174 USE(result);
3121 // processed in parallel later on. 3175 break;
3122 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); 3176 case kObjectsOldToOld:
3123 success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); 3177 result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
3124 DCHECK(success); 3178 if (!result) {
3125 USE(success); 3179 // Aborted compaction page. We can record slots here to have them
3126 // We need to return failure here to indicate that we want this page added 3180 // processed in parallel later on.
3127 // to the sweeper. 3181 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
3128 return false; 3182 result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
3129 } 3183 DCHECK(result);
3184 USE(result);
3185 // We need to return failure here to indicate that we want this page
3186 // added to the sweeper.
3187 return false;
3188 }
3189 break;
3190 default:
3191 UNREACHABLE();
3130 } 3192 }
3131 return success; 3193 return result;
3132 } 3194 }
3133 3195
3134 void MarkCompactCollector::Evacuator::Finalize() { 3196 void MarkCompactCollector::Evacuator::Finalize() {
3135 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); 3197 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3136 heap()->code_space()->MergeCompactionSpace( 3198 heap()->code_space()->MergeCompactionSpace(
3137 compaction_spaces_.Get(CODE_SPACE)); 3199 compaction_spaces_.Get(CODE_SPACE));
3138 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 3200 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3139 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); 3201 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3202 new_space_page_visitor.promoted_size());
3140 heap()->IncrementSemiSpaceCopiedObjectSize( 3203 heap()->IncrementSemiSpaceCopiedObjectSize(
3141 new_space_visitor_.semispace_copied_size()); 3204 new_space_visitor_.semispace_copied_size());
3142 heap()->IncrementYoungSurvivorsCounter( 3205 heap()->IncrementYoungSurvivorsCounter(
3143 new_space_visitor_.promoted_size() + 3206 new_space_visitor_.promoted_size() +
3144 new_space_visitor_.semispace_copied_size()); 3207 new_space_visitor_.semispace_copied_size() +
3208 new_space_page_visitor.promoted_size());
3145 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); 3209 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3146 } 3210 }
3147 3211
3148 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, 3212 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3149 intptr_t live_bytes) { 3213 intptr_t live_bytes) {
3150 if (!FLAG_parallel_compaction) return 1; 3214 if (!FLAG_parallel_compaction) return 1;
3151 // Compute the number of needed tasks based on a target compaction time, the 3215 // Compute the number of needed tasks based on a target compaction time, the
3152 // profiled compaction speed and marked live memory. 3216 // profiled compaction speed and marked live memory.
3153 // 3217 //
3154 // The number of parallel compaction tasks is limited by: 3218 // The number of parallel compaction tasks is limited by:
(...skipping 29 matching lines...) Expand all
3184 3248
3185 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, 3249 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3186 MemoryChunk* chunk, PerPageData) { 3250 MemoryChunk* chunk, PerPageData) {
3187 return evacuator->EvacuatePage(chunk); 3251 return evacuator->EvacuatePage(chunk);
3188 } 3252 }
3189 3253
3190 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, 3254 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3191 bool success, PerPageData data) { 3255 bool success, PerPageData data) {
3192 if (chunk->InNewSpace()) { 3256 if (chunk->InNewSpace()) {
3193 DCHECK(success); 3257 DCHECK(success);
3258 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3259 DCHECK(success);
3260 Page* p = static_cast<Page*>(chunk);
3261 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3262 p->ForAllFreeListCategories(
3263 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3264 heap->mark_compact_collector()->sweeper().AddLatePage(
3265 p->owner()->identity(), p);
3194 } else { 3266 } else {
3195 Page* p = static_cast<Page*>(chunk); 3267 Page* p = static_cast<Page*>(chunk);
3196 if (success) { 3268 if (success) {
3197 DCHECK(p->IsEvacuationCandidate()); 3269 DCHECK(p->IsEvacuationCandidate());
3198 DCHECK(p->SweepingDone()); 3270 DCHECK(p->SweepingDone());
3199 p->Unlink(); 3271 p->Unlink();
3200 } else { 3272 } else {
3201 // We have partially compacted the page, i.e., some objects may have 3273 // We have partially compacted the page, i.e., some objects may have
3202 // moved, others are still in place. 3274 // moved, others are still in place.
3203 p->SetFlag(Page::COMPACTION_WAS_ABORTED); 3275 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3204 p->ClearEvacuationCandidate(); 3276 p->ClearEvacuationCandidate();
3205 // Slots have already been recorded so we just need to add it to the 3277 // Slots have already been recorded so we just need to add it to the
3206 // sweeper. 3278 // sweeper.
3207 *data += 1; 3279 *data += 1;
3208 } 3280 }
3209 } 3281 }
3210 } 3282 }
3211 }; 3283 };
3212 3284
3213 void MarkCompactCollector::EvacuatePagesInParallel() { 3285 void MarkCompactCollector::EvacuatePagesInParallel() {
3214 PageParallelJob<EvacuationJobTraits> job( 3286 PageParallelJob<EvacuationJobTraits> job(
3215 heap_, heap_->isolate()->cancelable_task_manager()); 3287 heap_, heap_->isolate()->cancelable_task_manager());
3216 3288
3217 int abandoned_pages = 0; 3289 int abandoned_pages = 0;
3218 intptr_t live_bytes = 0; 3290 intptr_t live_bytes = 0;
3219 for (Page* page : evacuation_candidates_) { 3291 for (Page* page : evacuation_candidates_) {
3220 live_bytes += page->LiveBytes(); 3292 live_bytes += page->LiveBytes();
3221 job.AddPage(page, &abandoned_pages); 3293 job.AddPage(page, &abandoned_pages);
3222 } 3294 }
3295 const Address age_mark = heap()->new_space()->age_mark();
3223 for (NewSpacePage* page : newspace_evacuation_candidates_) { 3296 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3224 live_bytes += page->LiveBytes(); 3297 live_bytes += page->LiveBytes();
3298 if (!page->NeverEvacuate() &&
3299 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3300 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
3301 !page->Contains(age_mark)) {
3302 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3303 }
3225 job.AddPage(page, &abandoned_pages); 3304 job.AddPage(page, &abandoned_pages);
3226 } 3305 }
3227 DCHECK_GE(job.NumberOfPages(), 1); 3306 DCHECK_GE(job.NumberOfPages(), 1);
3228 3307
3229 // Used for trace summary. 3308 // Used for trace summary.
3230 double compaction_speed = 0; 3309 double compaction_speed = 0;
3231 if (FLAG_trace_evacuation) { 3310 if (FLAG_trace_evacuation) {
3232 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3311 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3233 } 3312 }
3234 3313
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
3374 #ifdef VERIFY_HEAP 3453 #ifdef VERIFY_HEAP
3375 static void VerifyAllBlackObjects(MemoryChunk* page) { 3454 static void VerifyAllBlackObjects(MemoryChunk* page) {
3376 LiveObjectIterator<kAllLiveObjects> it(page); 3455 LiveObjectIterator<kAllLiveObjects> it(page);
3377 HeapObject* object = NULL; 3456 HeapObject* object = NULL;
3378 while ((object = it.Next()) != NULL) { 3457 while ((object = it.Next()) != NULL) {
3379 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3458 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3380 } 3459 }
3381 } 3460 }
3382 #endif // VERIFY_HEAP 3461 #endif // VERIFY_HEAP
3383 3462
3384 3463 template <class Visitor>
3385 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, 3464 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
3386 HeapObjectVisitor* visitor,
3387 IterationMode mode) { 3465 IterationMode mode) {
3388 #ifdef VERIFY_HEAP 3466 #ifdef VERIFY_HEAP
3389 VerifyAllBlackObjects(page); 3467 VerifyAllBlackObjects(page);
3390 #endif // VERIFY_HEAP 3468 #endif // VERIFY_HEAP
3391 3469
3392 LiveObjectIterator<kBlackObjects> it(page); 3470 LiveObjectIterator<kBlackObjects> it(page);
3393 HeapObject* object = nullptr; 3471 HeapObject* object = nullptr;
3394 while ((object = it.Next()) != nullptr) { 3472 while ((object = it.Next()) != nullptr) {
3395 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3473 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3396 if (!visitor->Visit(object)) { 3474 if (!visitor->Visit(object)) {
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
3537 RememberedSet<OLD_TO_OLD>::IterateTyped( 3615 RememberedSet<OLD_TO_OLD>::IterateTyped(
3538 chunk, [isolate, visitor](SlotType type, Address slot) { 3616 chunk, [isolate, visitor](SlotType type, Address slot) {
3539 UpdateTypedSlot(isolate, visitor, type, slot); 3617 UpdateTypedSlot(isolate, visitor, type, slot);
3540 return REMOVE_SLOT; 3618 return REMOVE_SLOT;
3541 }); 3619 });
3542 } 3620 }
3543 } 3621 }
3544 3622
3545 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { 3623 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
3546 MapWord map_word = object->map_word(); 3624 MapWord map_word = object->map_word();
3547 // Since we only filter invalid slots in old space, the store buffer can 3625 // There could still be stale pointers in large object space, map space,
3548 // still contain stale pointers in large object and in map spaces. Ignore 3626 // and old space for pages that have been promoted.
3549 // these pointers here.
3550 DCHECK(map_word.IsForwardingAddress() ||
3551 !object->GetHeap()->old_space()->Contains(
3552 reinterpret_cast<Address>(address)));
3553 if (map_word.IsForwardingAddress()) { 3627 if (map_word.IsForwardingAddress()) {
3554 // Update the corresponding slot. 3628 // Update the corresponding slot.
3555 *address = map_word.ToForwardingAddress(); 3629 *address = map_word.ToForwardingAddress();
3556 } 3630 }
3557 } 3631 }
3558 }; 3632 };
3559 3633
3560 int NumberOfPointerUpdateTasks(int pages) { 3634 int NumberOfPointerUpdateTasks(int pages) {
3561 if (!FLAG_parallel_pointer_update) return 1; 3635 if (!FLAG_parallel_pointer_update) return 1;
3562 const int kMaxTasks = 4; 3636 const int kMaxTasks = 4;
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
3878 MarkBit mark_bit = Marking::MarkBitFrom(host); 3952 MarkBit mark_bit = Marking::MarkBitFrom(host);
3879 if (Marking::IsBlack(mark_bit)) { 3953 if (Marking::IsBlack(mark_bit)) {
3880 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 3954 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3881 RecordRelocSlot(host, &rinfo, target); 3955 RecordRelocSlot(host, &rinfo, target);
3882 } 3956 }
3883 } 3957 }
3884 } 3958 }
3885 3959
3886 } // namespace internal 3960 } // namespace internal
3887 } // namespace v8 3961 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698