Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(520)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1863983002: 🏄 [heap] Add page evacuation mode for new->old (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase on master Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1648 matching lines...) Expand 10 before | Expand all | Expand 10 after
1659 explicit EvacuateNewSpaceVisitor(Heap* heap, 1659 explicit EvacuateNewSpaceVisitor(Heap* heap,
1660 CompactionSpaceCollection* compaction_spaces, 1660 CompactionSpaceCollection* compaction_spaces,
1661 HashMap* local_pretenuring_feedback) 1661 HashMap* local_pretenuring_feedback)
1662 : EvacuateVisitorBase(heap, compaction_spaces), 1662 : EvacuateVisitorBase(heap, compaction_spaces),
1663 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1663 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1664 space_to_allocate_(NEW_SPACE), 1664 space_to_allocate_(NEW_SPACE),
1665 promoted_size_(0), 1665 promoted_size_(0),
1666 semispace_copied_size_(0), 1666 semispace_copied_size_(0),
1667 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1667 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1668 1668
1669 bool Visit(HeapObject* object) override { 1669 inline bool Visit(HeapObject* object) override {
1670 heap_->UpdateAllocationSite<Heap::kCached>(object, 1670 heap_->UpdateAllocationSite<Heap::kCached>(object,
1671 local_pretenuring_feedback_); 1671 local_pretenuring_feedback_);
1672 int size = object->Size(); 1672 int size = object->Size();
1673 HeapObject* target_object = nullptr; 1673 HeapObject* target_object = nullptr;
1674 if (heap_->ShouldBePromoted(object->address(), size) && 1674 if (heap_->ShouldBePromoted(object->address(), size) &&
1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, 1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1676 &target_object)) { 1676 &target_object)) {
1677 // If we end up needing more special cases, we should factor this out. 1677 // If we end up needing more special cases, we should factor this out.
1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1679 heap_->array_buffer_tracker()->Promote( 1679 heap_->array_buffer_tracker()->Promote(
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 return allocation; 1791 return allocation;
1792 } 1792 }
1793 1793
1794 LocalAllocationBuffer buffer_; 1794 LocalAllocationBuffer buffer_;
1795 AllocationSpace space_to_allocate_; 1795 AllocationSpace space_to_allocate_;
1796 intptr_t promoted_size_; 1796 intptr_t promoted_size_;
1797 intptr_t semispace_copied_size_; 1797 intptr_t semispace_copied_size_;
1798 HashMap* local_pretenuring_feedback_; 1798 HashMap* local_pretenuring_feedback_;
1799 }; 1799 };
1800 1800
1801 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1802 : public MarkCompactCollector::HeapObjectVisitor {
1803 public:
1804 EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
1805
1806 static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
1807 page->heap()->new_space()->ReplaceWithEmptyPage(page);
1808 Page* new_page = Page::Convert(page, owner);
1809 new_page->SetFlag(Page::FAST_NEW_OLD_EVACUATION);
1810 }
1811
1812 inline bool Visit(HeapObject* object) {
1813 if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
1814 object->GetHeap()->array_buffer_tracker()->Promote(
1815 JSArrayBuffer::cast(object));
1816 }
1817 RecordMigratedSlotVisitor visitor;
1818 object->IterateBodyFast(&visitor);
1819 promoted_size_ += object->Size();
1820 return true;
1821 }
1822
1823 intptr_t promoted_size() { return promoted_size_; }
1824
1825 private:
1826 intptr_t promoted_size_;
1827 };
1801 1828
1802 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1829 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1803 : public MarkCompactCollector::EvacuateVisitorBase { 1830 : public MarkCompactCollector::EvacuateVisitorBase {
1804 public: 1831 public:
1805 EvacuateOldSpaceVisitor(Heap* heap, 1832 EvacuateOldSpaceVisitor(Heap* heap,
1806 CompactionSpaceCollection* compaction_spaces) 1833 CompactionSpaceCollection* compaction_spaces)
1807 : EvacuateVisitorBase(heap, compaction_spaces) {} 1834 : EvacuateVisitorBase(heap, compaction_spaces) {}
1808 1835
1809 bool Visit(HeapObject* object) override { 1836 inline bool Visit(HeapObject* object) override {
1810 CompactionSpace* target_space = compaction_spaces_->Get( 1837 CompactionSpace* target_space = compaction_spaces_->Get(
1811 Page::FromAddress(object->address())->owner()->identity()); 1838 Page::FromAddress(object->address())->owner()->identity());
1812 HeapObject* target_object = nullptr; 1839 HeapObject* target_object = nullptr;
1813 if (TryEvacuateObject(target_space, object, &target_object)) { 1840 if (TryEvacuateObject(target_space, object, &target_object)) {
1814 DCHECK(object->map_word().IsForwardingAddress()); 1841 DCHECK(object->map_word().IsForwardingAddress());
1815 return true; 1842 return true;
1816 } 1843 }
1817 return false; 1844 return false;
1818 } 1845 }
1819 }; 1846 };
(...skipping 1192 matching lines...) Expand 10 before | Expand all | Expand 10 after
3012 newspace_evacuation_candidates_.Add(it.next()); 3039 newspace_evacuation_candidates_.Add(it.next());
3013 } 3040 }
3014 new_space->Flip(); 3041 new_space->Flip();
3015 new_space->ResetAllocationInfo(); 3042 new_space->ResetAllocationInfo();
3016 } 3043 }
3017 3044
3018 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { 3045 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3019 newspace_evacuation_candidates_.Rewind(0); 3046 newspace_evacuation_candidates_.Rewind(0);
3020 } 3047 }
3021 3048
3022
3023 class MarkCompactCollector::Evacuator : public Malloced { 3049 class MarkCompactCollector::Evacuator : public Malloced {
3024 public: 3050 public:
3051 // NewSpacePages with more live bytes than this threshold qualify for fast
3052 // evacuation.
3053 static int PageEvacuationThreshold() {
3054 if (!FLAG_page_evacuation)
Hannes Payer (out of office) 2016/04/18 12:12:23 Invert this case: if (FLAG_page_evacuation)
Michael Lippautz 2016/04/18 12:57:38 Done.
Michael Lippautz 2016/04/18 12:57:38 Done.
3055 return NewSpacePage::kAllocatableMemory + kPointerSize;
3056 return FLAG_page_evacuation_threshold * NewSpacePage::kAllocatableMemory /
3057 100;
3058 }
3059
3025 explicit Evacuator(MarkCompactCollector* collector) 3060 explicit Evacuator(MarkCompactCollector* collector)
3026 : collector_(collector), 3061 : collector_(collector),
3027 compaction_spaces_(collector->heap()), 3062 compaction_spaces_(collector->heap()),
3028 local_pretenuring_feedback_(HashMap::PointersMatch, 3063 local_pretenuring_feedback_(HashMap::PointersMatch,
3029 kInitialLocalPretenuringFeedbackCapacity), 3064 kInitialLocalPretenuringFeedbackCapacity),
3030 new_space_visitor_(collector->heap(), &compaction_spaces_, 3065 new_space_visitor_(collector->heap(), &compaction_spaces_,
3031 &local_pretenuring_feedback_), 3066 &local_pretenuring_feedback_),
3067 new_space_page_visitor(),
3032 old_space_visitor_(collector->heap(), &compaction_spaces_), 3068 old_space_visitor_(collector->heap(), &compaction_spaces_),
3033 duration_(0.0), 3069 duration_(0.0),
3034 bytes_compacted_(0) {} 3070 bytes_compacted_(0) {}
3035 3071
3036 inline bool EvacuatePage(MemoryChunk* chunk); 3072 inline bool EvacuatePage(MemoryChunk* chunk);
3037 3073
3038 // Merge back locally cached info sequentially. Note that this method needs 3074 // Merge back locally cached info sequentially. Note that this method needs
3039 // to be called from the main thread. 3075 // to be called from the main thread.
3040 inline void Finalize(); 3076 inline void Finalize();
3041 3077
3042 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } 3078 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3043 3079
3044 private: 3080 private:
3081 enum EvacuationMode {
3082 kObjectsNewToOld,
3083 kPageNewToOld,
3084 kObjectsOldToOld,
3085 };
3086
3045 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 3087 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3046 3088
3047 Heap* heap() { return collector_->heap(); } 3089 inline Heap* heap() { return collector_->heap(); }
3090
3091 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3092 // Note: The order of checks is important in this function.
3093 if (chunk->InNewSpace()) return kObjectsNewToOld;
3094 if (chunk->IsFlagSet(MemoryChunk::FAST_NEW_OLD_EVACUATION))
3095 return kPageNewToOld;
3096 DCHECK(chunk->IsEvacuationCandidate());
3097 return kObjectsOldToOld;
3098 }
3048 3099
3049 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { 3100 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3050 duration_ += duration; 3101 duration_ += duration;
3051 bytes_compacted_ += bytes_compacted; 3102 bytes_compacted_ += bytes_compacted;
3052 } 3103 }
3053 3104
3054 template <IterationMode mode> 3105 template <IterationMode mode, class Visitor>
3055 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); 3106 inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
3056 3107
3057 MarkCompactCollector* collector_; 3108 MarkCompactCollector* collector_;
3058 3109
3059 // Locally cached collector data. 3110 // Locally cached collector data.
3060 CompactionSpaceCollection compaction_spaces_; 3111 CompactionSpaceCollection compaction_spaces_;
3061 HashMap local_pretenuring_feedback_; 3112 HashMap local_pretenuring_feedback_;
3062 3113
3063 // Visitors for the corresponding spaces. 3114 // Visitors for the corresponding spaces.
3064 EvacuateNewSpaceVisitor new_space_visitor_; 3115 EvacuateNewSpaceVisitor new_space_visitor_;
3116 EvacuateNewSpacePageVisitor new_space_page_visitor;
3065 EvacuateOldSpaceVisitor old_space_visitor_; 3117 EvacuateOldSpaceVisitor old_space_visitor_;
3066 3118
3067 // Book keeping info. 3119 // Book keeping info.
3068 double duration_; 3120 double duration_;
3069 intptr_t bytes_compacted_; 3121 intptr_t bytes_compacted_;
3070 }; 3122 };
3071 3123
3072 template <MarkCompactCollector::IterationMode mode> 3124 template <MarkCompactCollector::IterationMode mode, class Visitor>
3073 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( 3125 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
3074 MemoryChunk* p, HeapObjectVisitor* visitor) { 3126 Visitor* visitor) {
3075 bool success = false; 3127 bool success = false;
3076 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); 3128 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
3129 p->IsFlagSet(Page::FAST_NEW_OLD_EVACUATION));
3077 int saved_live_bytes = p->LiveBytes(); 3130 int saved_live_bytes = p->LiveBytes();
3078 double evacuation_time; 3131 double evacuation_time;
3079 { 3132 {
3080 AlwaysAllocateScope always_allocate(heap()->isolate()); 3133 AlwaysAllocateScope always_allocate(heap()->isolate());
3081 TimedScope timed_scope(&evacuation_time); 3134 TimedScope timed_scope(&evacuation_time);
3082 success = collector_->VisitLiveObjects(p, visitor, mode); 3135 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
3083 } 3136 }
3084 if (FLAG_trace_evacuation) { 3137 if (FLAG_trace_evacuation) {
3085 const char age_mark_tag = 3138 const char age_mark_tag =
3086 !p->InNewSpace() 3139 !p->InNewSpace()
3087 ? 'x' 3140 ? 'x'
3088 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) 3141 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
3089 ? '>' 3142 ? '>'
3090 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' 3143 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
3091 : '#'; 3144 : '#';
3092 PrintIsolate(heap()->isolate(), 3145 PrintIsolate(heap()->isolate(),
3093 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " 3146 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
3094 "executable=%d live_bytes=%d time=%f\n", 3147 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
3095 this, p, p->InNewSpace(), age_mark_tag, 3148 this, p, p->InNewSpace(), age_mark_tag,
3149 p->IsFlagSet(MemoryChunk::FAST_NEW_OLD_EVACUATION),
3096 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, 3150 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
3097 evacuation_time); 3151 evacuation_time);
3098 } 3152 }
3099 if (success) { 3153 if (success) {
3100 ReportCompactionProgress(evacuation_time, saved_live_bytes); 3154 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3101 } 3155 }
3102 return success; 3156 return success;
3103 } 3157 }
3104 3158
3105 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { 3159 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
3106 bool success = false; 3160 bool result = false;
3107 if (chunk->InNewSpace()) { 3161 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
3108 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), 3162 NewSpacePage::kSweepingDone);
3109 NewSpacePage::kSweepingDone); 3163 switch (ComputeEvacuationMode(chunk)) {
3110 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); 3164 case kObjectsNewToOld:
3111 DCHECK(success); 3165 result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
3112 USE(success); 3166 DCHECK(result);
3113 } else { 3167 USE(result);
3114 DCHECK(chunk->IsEvacuationCandidate()); 3168 break;
3115 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); 3169 case kPageNewToOld:
3116 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); 3170 result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
3117 if (!success) { 3171 DCHECK(result);
3118 // Aborted compaction page. We can record slots here to have them 3172 USE(result);
3119 // processed in parallel later on. 3173 break;
3120 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); 3174 case kObjectsOldToOld:
3121 success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); 3175 result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
3122 DCHECK(success); 3176 if (!result) {
3123 USE(success); 3177 // Aborted compaction page. We can record slots here to have them
3124 // We need to return failure here to indicate that we want this page added 3178 // processed in parallel later on.
3125 // to the sweeper. 3179 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
3126 return false; 3180 result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
3127 } 3181 DCHECK(result);
3182 USE(result);
3183 // We need to return failure here to indicate that we want this page
3184 // added to the sweeper.
3185 return false;
3186 }
3187 break;
3188 default:
3189 UNREACHABLE();
3128 } 3190 }
3129 return success; 3191 return result;
3130 } 3192 }
3131 3193
3132 void MarkCompactCollector::Evacuator::Finalize() { 3194 void MarkCompactCollector::Evacuator::Finalize() {
3133 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); 3195 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3134 heap()->code_space()->MergeCompactionSpace( 3196 heap()->code_space()->MergeCompactionSpace(
3135 compaction_spaces_.Get(CODE_SPACE)); 3197 compaction_spaces_.Get(CODE_SPACE));
3136 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 3198 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3137 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); 3199 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3200 new_space_page_visitor.promoted_size());
3138 heap()->IncrementSemiSpaceCopiedObjectSize( 3201 heap()->IncrementSemiSpaceCopiedObjectSize(
3139 new_space_visitor_.semispace_copied_size()); 3202 new_space_visitor_.semispace_copied_size());
3140 heap()->IncrementYoungSurvivorsCounter( 3203 heap()->IncrementYoungSurvivorsCounter(
3141 new_space_visitor_.promoted_size() + 3204 new_space_visitor_.promoted_size() +
3142 new_space_visitor_.semispace_copied_size()); 3205 new_space_visitor_.semispace_copied_size() +
3206 new_space_page_visitor.promoted_size());
3143 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); 3207 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3144 } 3208 }
3145 3209
3146 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, 3210 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3147 intptr_t live_bytes) { 3211 intptr_t live_bytes) {
3148 if (!FLAG_parallel_compaction) return 1; 3212 if (!FLAG_parallel_compaction) return 1;
3149 // Compute the number of needed tasks based on a target compaction time, the 3213 // Compute the number of needed tasks based on a target compaction time, the
3150 // profiled compaction speed and marked live memory. 3214 // profiled compaction speed and marked live memory.
3151 // 3215 //
3152 // The number of parallel compaction tasks is limited by: 3216 // The number of parallel compaction tasks is limited by:
(...skipping 29 matching lines...) Expand all
3182 3246
3183 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, 3247 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3184 MemoryChunk* chunk, PerPageData) { 3248 MemoryChunk* chunk, PerPageData) {
3185 return evacuator->EvacuatePage(chunk); 3249 return evacuator->EvacuatePage(chunk);
3186 } 3250 }
3187 3251
3188 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, 3252 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3189 bool success, PerPageData data) { 3253 bool success, PerPageData data) {
3190 if (chunk->InNewSpace()) { 3254 if (chunk->InNewSpace()) {
3191 DCHECK(success); 3255 DCHECK(success);
3256 } else if (chunk->IsFlagSet(Page::FAST_NEW_OLD_EVACUATION)) {
3257 DCHECK(success);
3258 Page* p = static_cast<Page*>(chunk);
3259 p->ClearFlag(Page::FAST_NEW_OLD_EVACUATION);
3260 p->ForAllFreeListCategories(
3261 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3262 heap->mark_compact_collector()->sweeper().AddLatePage(
3263 p->owner()->identity(), p);
3192 } else { 3264 } else {
3193 Page* p = static_cast<Page*>(chunk); 3265 Page* p = static_cast<Page*>(chunk);
3194 if (success) { 3266 if (success) {
3195 DCHECK(p->IsEvacuationCandidate()); 3267 DCHECK(p->IsEvacuationCandidate());
3196 DCHECK(p->SweepingDone()); 3268 DCHECK(p->SweepingDone());
3197 p->Unlink(); 3269 p->Unlink();
3198 } else { 3270 } else {
3199 // We have partially compacted the page, i.e., some objects may have 3271 // We have partially compacted the page, i.e., some objects may have
3200 // moved, others are still in place. 3272 // moved, others are still in place.
3201 p->SetFlag(Page::COMPACTION_WAS_ABORTED); 3273 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3202 p->ClearEvacuationCandidate(); 3274 p->ClearEvacuationCandidate();
3203 // Slots have already been recorded so we just need to add it to the 3275 // Slots have already been recorded so we just need to add it to the
3204 // sweeper. 3276 // sweeper.
3205 *data += 1; 3277 *data += 1;
3206 } 3278 }
3207 } 3279 }
3208 } 3280 }
3209 }; 3281 };
3210 3282
3211 void MarkCompactCollector::EvacuatePagesInParallel() { 3283 void MarkCompactCollector::EvacuatePagesInParallel() {
3212 PageParallelJob<EvacuationJobTraits> job( 3284 PageParallelJob<EvacuationJobTraits> job(
3213 heap_, heap_->isolate()->cancelable_task_manager()); 3285 heap_, heap_->isolate()->cancelable_task_manager());
3214 3286
3287 bool evacuated_pages = false;
3215 int abandoned_pages = 0; 3288 int abandoned_pages = 0;
3216 intptr_t live_bytes = 0; 3289 intptr_t live_bytes = 0;
3217 for (Page* page : evacuation_candidates_) { 3290 for (Page* page : evacuation_candidates_) {
3218 live_bytes += page->LiveBytes(); 3291 live_bytes += page->LiveBytes();
3219 job.AddPage(page, &abandoned_pages); 3292 job.AddPage(page, &abandoned_pages);
3220 } 3293 }
3294 const Address age_mark = heap()->new_space()->age_mark();
3221 for (NewSpacePage* page : newspace_evacuation_candidates_) { 3295 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3222 live_bytes += page->LiveBytes(); 3296 live_bytes += page->LiveBytes();
3297 if (!page->NeverEvacuate() &&
3298 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3299 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
3300 !page->Contains(age_mark)) {
3301 evacuated_pages = true;
3302 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3303 }
3223 job.AddPage(page, &abandoned_pages); 3304 job.AddPage(page, &abandoned_pages);
3224 } 3305 }
3225 DCHECK_GE(job.NumberOfPages(), 1); 3306 DCHECK_GE(job.NumberOfPages(), 1);
3226 3307
3227 // Used for trace summary. 3308 // Used for trace summary.
3228 double compaction_speed = 0; 3309 double compaction_speed = 0;
3229 if (FLAG_trace_evacuation) { 3310 if (FLAG_trace_evacuation) {
3230 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3311 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3231 } 3312 }
3232 3313
3233 const int wanted_num_tasks = 3314 const int wanted_num_tasks =
3234 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); 3315 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3235 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; 3316 Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3236 for (int i = 0; i < wanted_num_tasks; i++) { 3317 for (int i = 0; i < wanted_num_tasks; i++) {
3237 evacuators[i] = new Evacuator(this); 3318 evacuators[i] = new Evacuator(this);
3238 } 3319 }
3239 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); 3320 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3240 for (int i = 0; i < wanted_num_tasks; i++) { 3321 for (int i = 0; i < wanted_num_tasks; i++) {
3241 evacuators[i]->Finalize(); 3322 evacuators[i]->Finalize();
3242 delete evacuators[i]; 3323 delete evacuators[i];
3243 } 3324 }
3244 delete[] evacuators; 3325 delete[] evacuators;
3245 3326
3327 if (evacuated_pages) {
Hannes Payer (out of office) 2016/04/18 12:12:23 This should be part of UpdatePointersAfterEvacuati
Michael Lippautz 2016/04/18 12:57:38 Removed. As you said, this should fall out from re
3328 heap()
3329 ->external_string_table_
3330 .CleanUp<Heap::ExternalStringTable::CleanupMode::kPromoteOnly>();
3331 }
3332
3246 if (FLAG_trace_evacuation) { 3333 if (FLAG_trace_evacuation) {
3247 PrintIsolate(isolate(), 3334 PrintIsolate(isolate(),
3248 "%8.0f ms: evacuation-summary: parallel=%s pages=%d " 3335 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3249 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS 3336 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3250 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n", 3337 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3251 isolate()->time_millis_since_init(), 3338 isolate()->time_millis_since_init(),
3252 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), 3339 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3253 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), 3340 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3254 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), 3341 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3255 live_bytes, compaction_speed); 3342 live_bytes, compaction_speed);
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
3372 #ifdef VERIFY_HEAP 3459 #ifdef VERIFY_HEAP
3373 static void VerifyAllBlackObjects(MemoryChunk* page) { 3460 static void VerifyAllBlackObjects(MemoryChunk* page) {
3374 LiveObjectIterator<kAllLiveObjects> it(page); 3461 LiveObjectIterator<kAllLiveObjects> it(page);
3375 HeapObject* object = NULL; 3462 HeapObject* object = NULL;
3376 while ((object = it.Next()) != NULL) { 3463 while ((object = it.Next()) != NULL) {
3377 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3464 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3378 } 3465 }
3379 } 3466 }
3380 #endif // VERIFY_HEAP 3467 #endif // VERIFY_HEAP
3381 3468
3382 3469 template <class Visitor>
3383 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, 3470 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
3384 HeapObjectVisitor* visitor,
3385 IterationMode mode) { 3471 IterationMode mode) {
3386 #ifdef VERIFY_HEAP 3472 #ifdef VERIFY_HEAP
3387 VerifyAllBlackObjects(page); 3473 VerifyAllBlackObjects(page);
3388 #endif // VERIFY_HEAP 3474 #endif // VERIFY_HEAP
3389 3475
3390 LiveObjectIterator<kBlackObjects> it(page); 3476 LiveObjectIterator<kBlackObjects> it(page);
3391 HeapObject* object = nullptr; 3477 HeapObject* object = nullptr;
3392 while ((object = it.Next()) != nullptr) { 3478 while ((object = it.Next()) != nullptr) {
3393 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3479 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3394 if (!visitor->Visit(object)) { 3480 if (!visitor->Visit(object)) {
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
3535 RememberedSet<OLD_TO_OLD>::IterateTyped( 3621 RememberedSet<OLD_TO_OLD>::IterateTyped(
3536 chunk, [isolate, visitor](SlotType type, Address slot) { 3622 chunk, [isolate, visitor](SlotType type, Address slot) {
3537 UpdateTypedSlot(isolate, visitor, type, slot); 3623 UpdateTypedSlot(isolate, visitor, type, slot);
3538 return REMOVE_SLOT; 3624 return REMOVE_SLOT;
3539 }); 3625 });
3540 } 3626 }
3541 } 3627 }
3542 3628
3543 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { 3629 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
3544 MapWord map_word = object->map_word(); 3630 MapWord map_word = object->map_word();
3545 // Since we only filter invalid slots in old space, the store buffer can 3631 // There could still be stale pointers in large object space, map space,
3546 // still contain stale pointers in large object and in map spaces. Ignore 3632 // and old space for pages that have been promoted.
3547 // these pointers here.
3548 DCHECK(map_word.IsForwardingAddress() ||
3549 !object->GetHeap()->old_space()->Contains(
3550 reinterpret_cast<Address>(address)));
3551 if (map_word.IsForwardingAddress()) { 3633 if (map_word.IsForwardingAddress()) {
3552 // Update the corresponding slot. 3634 // Update the corresponding slot.
3553 *address = map_word.ToForwardingAddress(); 3635 *address = map_word.ToForwardingAddress();
3554 } 3636 }
3555 } 3637 }
3556 }; 3638 };
3557 3639
3558 int NumberOfPointerUpdateTasks(int pages) { 3640 int NumberOfPointerUpdateTasks(int pages) {
3559 if (!FLAG_parallel_pointer_update) return 1; 3641 if (!FLAG_parallel_pointer_update) return 1;
3560 const int kMaxTasks = 4; 3642 const int kMaxTasks = 4;
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
3876 MarkBit mark_bit = Marking::MarkBitFrom(host); 3958 MarkBit mark_bit = Marking::MarkBitFrom(host);
3877 if (Marking::IsBlack(mark_bit)) { 3959 if (Marking::IsBlack(mark_bit)) {
3878 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 3960 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3879 RecordRelocSlot(host, &rinfo, target); 3961 RecordRelocSlot(host, &rinfo, target);
3880 } 3962 }
3881 } 3963 }
3882 } 3964 }
3883 3965
3884 } // namespace internal 3966 } // namespace internal
3885 } // namespace v8 3967 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698