Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(150)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1901093002: Reland of "🏄 [heap] Add page evacuation mode for new->old" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix ExternalStringTable handling Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1648 matching lines...) Expand 10 before | Expand all | Expand 10 after
1659 explicit EvacuateNewSpaceVisitor(Heap* heap, 1659 explicit EvacuateNewSpaceVisitor(Heap* heap,
1660 CompactionSpaceCollection* compaction_spaces, 1660 CompactionSpaceCollection* compaction_spaces,
1661 HashMap* local_pretenuring_feedback) 1661 HashMap* local_pretenuring_feedback)
1662 : EvacuateVisitorBase(heap, compaction_spaces), 1662 : EvacuateVisitorBase(heap, compaction_spaces),
1663 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1663 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1664 space_to_allocate_(NEW_SPACE), 1664 space_to_allocate_(NEW_SPACE),
1665 promoted_size_(0), 1665 promoted_size_(0),
1666 semispace_copied_size_(0), 1666 semispace_copied_size_(0),
1667 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1667 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1668 1668
1669 bool Visit(HeapObject* object) override { 1669 inline bool Visit(HeapObject* object) override {
1670 heap_->UpdateAllocationSite<Heap::kCached>(object, 1670 heap_->UpdateAllocationSite<Heap::kCached>(object,
1671 local_pretenuring_feedback_); 1671 local_pretenuring_feedback_);
1672 int size = object->Size(); 1672 int size = object->Size();
1673 HeapObject* target_object = nullptr; 1673 HeapObject* target_object = nullptr;
1674 if (heap_->ShouldBePromoted(object->address(), size) && 1674 if (heap_->ShouldBePromoted(object->address(), size) &&
1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, 1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1676 &target_object)) { 1676 &target_object)) {
1677 // If we end up needing more special cases, we should factor this out. 1677 // If we end up needing more special cases, we should factor this out.
1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1679 heap_->array_buffer_tracker()->Promote( 1679 heap_->array_buffer_tracker()->Promote(
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 return allocation; 1791 return allocation;
1792 } 1792 }
1793 1793
1794 LocalAllocationBuffer buffer_; 1794 LocalAllocationBuffer buffer_;
1795 AllocationSpace space_to_allocate_; 1795 AllocationSpace space_to_allocate_;
1796 intptr_t promoted_size_; 1796 intptr_t promoted_size_;
1797 intptr_t semispace_copied_size_; 1797 intptr_t semispace_copied_size_;
1798 HashMap* local_pretenuring_feedback_; 1798 HashMap* local_pretenuring_feedback_;
1799 }; 1799 };
1800 1800
1801 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1802 : public MarkCompactCollector::HeapObjectVisitor {
1803 public:
1804 EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
1805
1806 static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
1807 page->heap()->new_space()->ReplaceWithEmptyPage(page);
1808 Page* new_page = Page::Convert(page, owner);
1809 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1810 }
1811
1812 inline bool Visit(HeapObject* object) {
1813 if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
1814 object->GetHeap()->array_buffer_tracker()->Promote(
1815 JSArrayBuffer::cast(object));
1816 }
1817 RecordMigratedSlotVisitor visitor;
1818 object->IterateBodyFast(&visitor);
1819 promoted_size_ += object->Size();
1820 return true;
1821 }
1822
1823 intptr_t promoted_size() { return promoted_size_; }
1824
1825 private:
1826 intptr_t promoted_size_;
1827 };
1801 1828
1802 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1829 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1803 : public MarkCompactCollector::EvacuateVisitorBase { 1830 : public MarkCompactCollector::EvacuateVisitorBase {
1804 public: 1831 public:
1805 EvacuateOldSpaceVisitor(Heap* heap, 1832 EvacuateOldSpaceVisitor(Heap* heap,
1806 CompactionSpaceCollection* compaction_spaces) 1833 CompactionSpaceCollection* compaction_spaces)
1807 : EvacuateVisitorBase(heap, compaction_spaces) {} 1834 : EvacuateVisitorBase(heap, compaction_spaces) {}
1808 1835
1809 bool Visit(HeapObject* object) override { 1836 inline bool Visit(HeapObject* object) override {
1810 CompactionSpace* target_space = compaction_spaces_->Get( 1837 CompactionSpace* target_space = compaction_spaces_->Get(
1811 Page::FromAddress(object->address())->owner()->identity()); 1838 Page::FromAddress(object->address())->owner()->identity());
1812 HeapObject* target_object = nullptr; 1839 HeapObject* target_object = nullptr;
1813 if (TryEvacuateObject(target_space, object, &target_object)) { 1840 if (TryEvacuateObject(target_space, object, &target_object)) {
1814 DCHECK(object->map_word().IsForwardingAddress()); 1841 DCHECK(object->map_word().IsForwardingAddress());
1815 return true; 1842 return true;
1816 } 1843 }
1817 return false; 1844 return false;
1818 } 1845 }
1819 }; 1846 };
(...skipping 1191 matching lines...) Expand 10 before | Expand all | Expand 10 after
3011 newspace_evacuation_candidates_.Add(it.next()); 3038 newspace_evacuation_candidates_.Add(it.next());
3012 } 3039 }
3013 new_space->Flip(); 3040 new_space->Flip();
3014 new_space->ResetAllocationInfo(); 3041 new_space->ResetAllocationInfo();
3015 } 3042 }
3016 3043
3017 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { 3044 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3018 newspace_evacuation_candidates_.Rewind(0); 3045 newspace_evacuation_candidates_.Rewind(0);
3019 } 3046 }
3020 3047
3021
3022 class MarkCompactCollector::Evacuator : public Malloced { 3048 class MarkCompactCollector::Evacuator : public Malloced {
3023 public: 3049 public:
3050 // NewSpacePages with more live bytes than this threshold qualify for fast
3051 // evacuation.
3052 static int PageEvacuationThreshold() {
3053 if (FLAG_page_promotion)
3054 return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
3055 100;
3056 return NewSpacePage::kAllocatableMemory + kPointerSize;
3057 }
3058
3024 explicit Evacuator(MarkCompactCollector* collector) 3059 explicit Evacuator(MarkCompactCollector* collector)
3025 : collector_(collector), 3060 : collector_(collector),
3026 compaction_spaces_(collector->heap()), 3061 compaction_spaces_(collector->heap()),
3027 local_pretenuring_feedback_(HashMap::PointersMatch, 3062 local_pretenuring_feedback_(HashMap::PointersMatch,
3028 kInitialLocalPretenuringFeedbackCapacity), 3063 kInitialLocalPretenuringFeedbackCapacity),
3029 new_space_visitor_(collector->heap(), &compaction_spaces_, 3064 new_space_visitor_(collector->heap(), &compaction_spaces_,
3030 &local_pretenuring_feedback_), 3065 &local_pretenuring_feedback_),
3066 new_space_page_visitor(),
3031 old_space_visitor_(collector->heap(), &compaction_spaces_), 3067 old_space_visitor_(collector->heap(), &compaction_spaces_),
3032 duration_(0.0), 3068 duration_(0.0),
3033 bytes_compacted_(0) {} 3069 bytes_compacted_(0) {}
3034 3070
3035 inline bool EvacuatePage(MemoryChunk* chunk); 3071 inline bool EvacuatePage(MemoryChunk* chunk);
3036 3072
3037 // Merge back locally cached info sequentially. Note that this method needs 3073 // Merge back locally cached info sequentially. Note that this method needs
3038 // to be called from the main thread. 3074 // to be called from the main thread.
3039 inline void Finalize(); 3075 inline void Finalize();
3040 3076
3041 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } 3077 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3042 3078
3043 private: 3079 private:
3080 enum EvacuationMode {
3081 kObjectsNewToOld,
3082 kPageNewToOld,
3083 kObjectsOldToOld,
3084 };
3085
3044 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 3086 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3045 3087
3046 Heap* heap() { return collector_->heap(); } 3088 inline Heap* heap() { return collector_->heap(); }
3089
3090 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3091 // Note: The order of checks is important in this function.
3092 if (chunk->InNewSpace()) return kObjectsNewToOld;
3093 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3094 return kPageNewToOld;
3095 DCHECK(chunk->IsEvacuationCandidate());
3096 return kObjectsOldToOld;
3097 }
3047 3098
3048 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { 3099 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3049 duration_ += duration; 3100 duration_ += duration;
3050 bytes_compacted_ += bytes_compacted; 3101 bytes_compacted_ += bytes_compacted;
3051 } 3102 }
3052 3103
3053 template <IterationMode mode> 3104 template <IterationMode mode, class Visitor>
3054 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); 3105 inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
3055 3106
3056 MarkCompactCollector* collector_; 3107 MarkCompactCollector* collector_;
3057 3108
3058 // Locally cached collector data. 3109 // Locally cached collector data.
3059 CompactionSpaceCollection compaction_spaces_; 3110 CompactionSpaceCollection compaction_spaces_;
3060 HashMap local_pretenuring_feedback_; 3111 HashMap local_pretenuring_feedback_;
3061 3112
3062 // Visitors for the corresponding spaces. 3113 // Visitors for the corresponding spaces.
3063 EvacuateNewSpaceVisitor new_space_visitor_; 3114 EvacuateNewSpaceVisitor new_space_visitor_;
3115 EvacuateNewSpacePageVisitor new_space_page_visitor;
3064 EvacuateOldSpaceVisitor old_space_visitor_; 3116 EvacuateOldSpaceVisitor old_space_visitor_;
3065 3117
3066 // Book keeping info. 3118 // Book keeping info.
3067 double duration_; 3119 double duration_;
3068 intptr_t bytes_compacted_; 3120 intptr_t bytes_compacted_;
3069 }; 3121 };
3070 3122
3071 template <MarkCompactCollector::IterationMode mode> 3123 template <MarkCompactCollector::IterationMode mode, class Visitor>
3072 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( 3124 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
3073 MemoryChunk* p, HeapObjectVisitor* visitor) { 3125 Visitor* visitor) {
3074 bool success = false; 3126 bool success = false;
3075 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); 3127 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
3128 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
3076 int saved_live_bytes = p->LiveBytes(); 3129 int saved_live_bytes = p->LiveBytes();
3077 double evacuation_time; 3130 double evacuation_time;
3078 { 3131 {
3079 AlwaysAllocateScope always_allocate(heap()->isolate()); 3132 AlwaysAllocateScope always_allocate(heap()->isolate());
3080 TimedScope timed_scope(&evacuation_time); 3133 TimedScope timed_scope(&evacuation_time);
3081 success = collector_->VisitLiveObjects(p, visitor, mode); 3134 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
3082 } 3135 }
3083 if (FLAG_trace_evacuation) { 3136 if (FLAG_trace_evacuation) {
3084 const char age_mark_tag = 3137 const char age_mark_tag =
3085 !p->InNewSpace() 3138 !p->InNewSpace()
3086 ? 'x' 3139 ? 'x'
3087 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) 3140 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
3088 ? '>' 3141 ? '>'
3089 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' 3142 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
3090 : '#'; 3143 : '#';
3091 PrintIsolate(heap()->isolate(), 3144 PrintIsolate(heap()->isolate(),
3092 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " 3145 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
3093 "executable=%d live_bytes=%d time=%f\n", 3146 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
3094 this, p, p->InNewSpace(), age_mark_tag, 3147 this, p, p->InNewSpace(), age_mark_tag,
3148 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
3095 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, 3149 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
3096 evacuation_time); 3150 evacuation_time);
3097 } 3151 }
3098 if (success) { 3152 if (success) {
3099 ReportCompactionProgress(evacuation_time, saved_live_bytes); 3153 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3100 } 3154 }
3101 return success; 3155 return success;
3102 } 3156 }
3103 3157
3104 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { 3158 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
3105 bool success = false; 3159 bool result = false;
3106 if (chunk->InNewSpace()) { 3160 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
3107 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), 3161 NewSpacePage::kSweepingDone);
3108 NewSpacePage::kSweepingDone); 3162 switch (ComputeEvacuationMode(chunk)) {
3109 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); 3163 case kObjectsNewToOld:
3110 DCHECK(success); 3164 result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
3111 USE(success); 3165 DCHECK(result);
3112 } else { 3166 USE(result);
3113 DCHECK(chunk->IsEvacuationCandidate()); 3167 break;
3114 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); 3168 case kPageNewToOld:
3115 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); 3169 result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
3116 if (!success) { 3170 DCHECK(result);
3117 // Aborted compaction page. We can record slots here to have them 3171 USE(result);
3118 // processed in parallel later on. 3172 break;
3119 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); 3173 case kObjectsOldToOld:
3120 success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); 3174 result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
3121 DCHECK(success); 3175 if (!result) {
3122 USE(success); 3176 // Aborted compaction page. We can record slots here to have them
3123 // We need to return failure here to indicate that we want this page added 3177 // processed in parallel later on.
3124 // to the sweeper. 3178 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
3125 return false; 3179 result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
3126 } 3180 DCHECK(result);
3181 USE(result);
3182 // We need to return failure here to indicate that we want this page
3183 // added to the sweeper.
3184 return false;
3185 }
3186 break;
3187 default:
3188 UNREACHABLE();
3127 } 3189 }
3128 return success; 3190 return result;
3129 } 3191 }
3130 3192
3131 void MarkCompactCollector::Evacuator::Finalize() { 3193 void MarkCompactCollector::Evacuator::Finalize() {
3132 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); 3194 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3133 heap()->code_space()->MergeCompactionSpace( 3195 heap()->code_space()->MergeCompactionSpace(
3134 compaction_spaces_.Get(CODE_SPACE)); 3196 compaction_spaces_.Get(CODE_SPACE));
3135 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 3197 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3136 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); 3198 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3199 new_space_page_visitor.promoted_size());
3137 heap()->IncrementSemiSpaceCopiedObjectSize( 3200 heap()->IncrementSemiSpaceCopiedObjectSize(
3138 new_space_visitor_.semispace_copied_size()); 3201 new_space_visitor_.semispace_copied_size());
3139 heap()->IncrementYoungSurvivorsCounter( 3202 heap()->IncrementYoungSurvivorsCounter(
3140 new_space_visitor_.promoted_size() + 3203 new_space_visitor_.promoted_size() +
3141 new_space_visitor_.semispace_copied_size()); 3204 new_space_visitor_.semispace_copied_size() +
3205 new_space_page_visitor.promoted_size());
3142 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); 3206 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3143 } 3207 }
3144 3208
3145 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, 3209 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3146 intptr_t live_bytes) { 3210 intptr_t live_bytes) {
3147 if (!FLAG_parallel_compaction) return 1; 3211 if (!FLAG_parallel_compaction) return 1;
3148 // Compute the number of needed tasks based on a target compaction time, the 3212 // Compute the number of needed tasks based on a target compaction time, the
3149 // profiled compaction speed and marked live memory. 3213 // profiled compaction speed and marked live memory.
3150 // 3214 //
3151 // The number of parallel compaction tasks is limited by: 3215 // The number of parallel compaction tasks is limited by:
(...skipping 29 matching lines...) Expand all
3181 3245
3182 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, 3246 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3183 MemoryChunk* chunk, PerPageData) { 3247 MemoryChunk* chunk, PerPageData) {
3184 return evacuator->EvacuatePage(chunk); 3248 return evacuator->EvacuatePage(chunk);
3185 } 3249 }
3186 3250
3187 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, 3251 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3188 bool success, PerPageData data) { 3252 bool success, PerPageData data) {
3189 if (chunk->InNewSpace()) { 3253 if (chunk->InNewSpace()) {
3190 DCHECK(success); 3254 DCHECK(success);
3255 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3256 DCHECK(success);
3257 Page* p = static_cast<Page*>(chunk);
3258 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3259 p->ForAllFreeListCategories(
3260 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3261 heap->mark_compact_collector()->sweeper().AddLatePage(
3262 p->owner()->identity(), p);
3191 } else { 3263 } else {
3192 Page* p = static_cast<Page*>(chunk); 3264 Page* p = static_cast<Page*>(chunk);
3193 if (success) { 3265 if (success) {
3194 DCHECK(p->IsEvacuationCandidate()); 3266 DCHECK(p->IsEvacuationCandidate());
3195 DCHECK(p->SweepingDone()); 3267 DCHECK(p->SweepingDone());
3196 p->Unlink(); 3268 p->Unlink();
3197 } else { 3269 } else {
3198 // We have partially compacted the page, i.e., some objects may have 3270 // We have partially compacted the page, i.e., some objects may have
3199 // moved, others are still in place. 3271 // moved, others are still in place.
3200 p->SetFlag(Page::COMPACTION_WAS_ABORTED); 3272 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3201 p->ClearEvacuationCandidate(); 3273 p->ClearEvacuationCandidate();
3202 // Slots have already been recorded so we just need to add it to the 3274 // Slots have already been recorded so we just need to add it to the
3203 // sweeper. 3275 // sweeper.
3204 *data += 1; 3276 *data += 1;
3205 } 3277 }
3206 } 3278 }
3207 } 3279 }
3208 }; 3280 };
3209 3281
3210 void MarkCompactCollector::EvacuatePagesInParallel() { 3282 void MarkCompactCollector::EvacuatePagesInParallel() {
3211 PageParallelJob<EvacuationJobTraits> job( 3283 PageParallelJob<EvacuationJobTraits> job(
3212 heap_, heap_->isolate()->cancelable_task_manager()); 3284 heap_, heap_->isolate()->cancelable_task_manager());
3213 3285
3214 int abandoned_pages = 0; 3286 int abandoned_pages = 0;
3215 intptr_t live_bytes = 0; 3287 intptr_t live_bytes = 0;
3216 for (Page* page : evacuation_candidates_) { 3288 for (Page* page : evacuation_candidates_) {
3217 live_bytes += page->LiveBytes(); 3289 live_bytes += page->LiveBytes();
3218 job.AddPage(page, &abandoned_pages); 3290 job.AddPage(page, &abandoned_pages);
3219 } 3291 }
3292 const Address age_mark = heap()->new_space()->age_mark();
3220 for (NewSpacePage* page : newspace_evacuation_candidates_) { 3293 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3221 live_bytes += page->LiveBytes(); 3294 live_bytes += page->LiveBytes();
3295 if (!page->NeverEvacuate() &&
3296 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3297 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
3298 !page->Contains(age_mark)) {
3299 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3300 }
3222 job.AddPage(page, &abandoned_pages); 3301 job.AddPage(page, &abandoned_pages);
3223 } 3302 }
3224 DCHECK_GE(job.NumberOfPages(), 1); 3303 DCHECK_GE(job.NumberOfPages(), 1);
3225 3304
3226 // Used for trace summary. 3305 // Used for trace summary.
3227 double compaction_speed = 0; 3306 double compaction_speed = 0;
3228 if (FLAG_trace_evacuation) { 3307 if (FLAG_trace_evacuation) {
3229 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3308 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3230 } 3309 }
3231 3310
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
3371 #ifdef VERIFY_HEAP 3450 #ifdef VERIFY_HEAP
3372 static void VerifyAllBlackObjects(MemoryChunk* page) { 3451 static void VerifyAllBlackObjects(MemoryChunk* page) {
3373 LiveObjectIterator<kAllLiveObjects> it(page); 3452 LiveObjectIterator<kAllLiveObjects> it(page);
3374 HeapObject* object = NULL; 3453 HeapObject* object = NULL;
3375 while ((object = it.Next()) != NULL) { 3454 while ((object = it.Next()) != NULL) {
3376 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3455 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3377 } 3456 }
3378 } 3457 }
3379 #endif // VERIFY_HEAP 3458 #endif // VERIFY_HEAP
3380 3459
3381 3460 template <class Visitor>
3382 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, 3461 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
3383 HeapObjectVisitor* visitor,
3384 IterationMode mode) { 3462 IterationMode mode) {
3385 #ifdef VERIFY_HEAP 3463 #ifdef VERIFY_HEAP
3386 VerifyAllBlackObjects(page); 3464 VerifyAllBlackObjects(page);
3387 #endif // VERIFY_HEAP 3465 #endif // VERIFY_HEAP
3388 3466
3389 LiveObjectIterator<kBlackObjects> it(page); 3467 LiveObjectIterator<kBlackObjects> it(page);
3390 HeapObject* object = nullptr; 3468 HeapObject* object = nullptr;
3391 while ((object = it.Next()) != nullptr) { 3469 while ((object = it.Next()) != nullptr) {
3392 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3470 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3393 if (!visitor->Visit(object)) { 3471 if (!visitor->Visit(object)) {
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
3534 RememberedSet<OLD_TO_OLD>::IterateTyped( 3612 RememberedSet<OLD_TO_OLD>::IterateTyped(
3535 chunk, [isolate, visitor](SlotType type, Address slot) { 3613 chunk, [isolate, visitor](SlotType type, Address slot) {
3536 UpdateTypedSlot(isolate, visitor, type, slot); 3614 UpdateTypedSlot(isolate, visitor, type, slot);
3537 return REMOVE_SLOT; 3615 return REMOVE_SLOT;
3538 }); 3616 });
3539 } 3617 }
3540 } 3618 }
3541 3619
3542 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { 3620 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
3543 MapWord map_word = object->map_word(); 3621 MapWord map_word = object->map_word();
3544 // Since we only filter invalid slots in old space, the store buffer can 3622 // There could still be stale pointers in large object space, map space,
3545 // still contain stale pointers in large object and in map spaces. Ignore 3623 // and old space for pages that have been promoted.
3546 // these pointers here.
3547 DCHECK(map_word.IsForwardingAddress() ||
3548 !object->GetHeap()->old_space()->Contains(
3549 reinterpret_cast<Address>(address)));
3550 if (map_word.IsForwardingAddress()) { 3624 if (map_word.IsForwardingAddress()) {
3551 // Update the corresponding slot. 3625 // Update the corresponding slot.
3552 *address = map_word.ToForwardingAddress(); 3626 *address = map_word.ToForwardingAddress();
3553 } 3627 }
3554 } 3628 }
3555 }; 3629 };
3556 3630
3557 int NumberOfPointerUpdateTasks(int pages) { 3631 int NumberOfPointerUpdateTasks(int pages) {
3558 if (!FLAG_parallel_pointer_update) return 1; 3632 if (!FLAG_parallel_pointer_update) return 1;
3559 const int kMaxTasks = 4; 3633 const int kMaxTasks = 4;
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
3875 MarkBit mark_bit = Marking::MarkBitFrom(host); 3949 MarkBit mark_bit = Marking::MarkBitFrom(host);
3876 if (Marking::IsBlack(mark_bit)) { 3950 if (Marking::IsBlack(mark_bit)) {
3877 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 3951 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3878 RecordRelocSlot(host, &rinfo, target); 3952 RecordRelocSlot(host, &rinfo, target);
3879 } 3953 }
3880 } 3954 }
3881 } 3955 }
3882 3956
3883 } // namespace internal 3957 } // namespace internal
3884 } // namespace v8 3958 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698