Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(132)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1896883003: Revert of 🏄 [heap] Add page evacuation mode for new->old (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 1648 matching lines...) Expand 10 before | Expand all | Expand 10 after
1659 explicit EvacuateNewSpaceVisitor(Heap* heap, 1659 explicit EvacuateNewSpaceVisitor(Heap* heap,
1660 CompactionSpaceCollection* compaction_spaces, 1660 CompactionSpaceCollection* compaction_spaces,
1661 HashMap* local_pretenuring_feedback) 1661 HashMap* local_pretenuring_feedback)
1662 : EvacuateVisitorBase(heap, compaction_spaces), 1662 : EvacuateVisitorBase(heap, compaction_spaces),
1663 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1663 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1664 space_to_allocate_(NEW_SPACE), 1664 space_to_allocate_(NEW_SPACE),
1665 promoted_size_(0), 1665 promoted_size_(0),
1666 semispace_copied_size_(0), 1666 semispace_copied_size_(0),
1667 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1667 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1668 1668
1669 inline bool Visit(HeapObject* object) override { 1669 bool Visit(HeapObject* object) override {
1670 heap_->UpdateAllocationSite<Heap::kCached>(object, 1670 heap_->UpdateAllocationSite<Heap::kCached>(object,
1671 local_pretenuring_feedback_); 1671 local_pretenuring_feedback_);
1672 int size = object->Size(); 1672 int size = object->Size();
1673 HeapObject* target_object = nullptr; 1673 HeapObject* target_object = nullptr;
1674 if (heap_->ShouldBePromoted(object->address(), size) && 1674 if (heap_->ShouldBePromoted(object->address(), size) &&
1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, 1675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1676 &target_object)) { 1676 &target_object)) {
1677 // If we end up needing more special cases, we should factor this out. 1677 // If we end up needing more special cases, we should factor this out.
1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1679 heap_->array_buffer_tracker()->Promote( 1679 heap_->array_buffer_tracker()->Promote(
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 return allocation; 1791 return allocation;
1792 } 1792 }
1793 1793
1794 LocalAllocationBuffer buffer_; 1794 LocalAllocationBuffer buffer_;
1795 AllocationSpace space_to_allocate_; 1795 AllocationSpace space_to_allocate_;
1796 intptr_t promoted_size_; 1796 intptr_t promoted_size_;
1797 intptr_t semispace_copied_size_; 1797 intptr_t semispace_copied_size_;
1798 HashMap* local_pretenuring_feedback_; 1798 HashMap* local_pretenuring_feedback_;
1799 }; 1799 };
1800 1800
1801 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1802 : public MarkCompactCollector::HeapObjectVisitor {
1803 public:
1804 EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
1805
1806 static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
1807 page->heap()->new_space()->ReplaceWithEmptyPage(page);
1808 Page* new_page = Page::Convert(page, owner);
1809 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1810 }
1811
1812 inline bool Visit(HeapObject* object) {
1813 if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
1814 object->GetHeap()->array_buffer_tracker()->Promote(
1815 JSArrayBuffer::cast(object));
1816 }
1817 RecordMigratedSlotVisitor visitor;
1818 object->IterateBodyFast(&visitor);
1819 promoted_size_ += object->Size();
1820 return true;
1821 }
1822
1823 intptr_t promoted_size() { return promoted_size_; }
1824
1825 private:
1826 intptr_t promoted_size_;
1827 };
1828 1801
1829 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1802 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1830 : public MarkCompactCollector::EvacuateVisitorBase { 1803 : public MarkCompactCollector::EvacuateVisitorBase {
1831 public: 1804 public:
1832 EvacuateOldSpaceVisitor(Heap* heap, 1805 EvacuateOldSpaceVisitor(Heap* heap,
1833 CompactionSpaceCollection* compaction_spaces) 1806 CompactionSpaceCollection* compaction_spaces)
1834 : EvacuateVisitorBase(heap, compaction_spaces) {} 1807 : EvacuateVisitorBase(heap, compaction_spaces) {}
1835 1808
1836 inline bool Visit(HeapObject* object) override { 1809 bool Visit(HeapObject* object) override {
1837 CompactionSpace* target_space = compaction_spaces_->Get( 1810 CompactionSpace* target_space = compaction_spaces_->Get(
1838 Page::FromAddress(object->address())->owner()->identity()); 1811 Page::FromAddress(object->address())->owner()->identity());
1839 HeapObject* target_object = nullptr; 1812 HeapObject* target_object = nullptr;
1840 if (TryEvacuateObject(target_space, object, &target_object)) { 1813 if (TryEvacuateObject(target_space, object, &target_object)) {
1841 DCHECK(object->map_word().IsForwardingAddress()); 1814 DCHECK(object->map_word().IsForwardingAddress());
1842 return true; 1815 return true;
1843 } 1816 }
1844 return false; 1817 return false;
1845 } 1818 }
1846 }; 1819 };
(...skipping 1194 matching lines...) Expand 10 before | Expand all | Expand 10 after
3041 newspace_evacuation_candidates_.Add(it.next()); 3014 newspace_evacuation_candidates_.Add(it.next());
3042 } 3015 }
3043 new_space->Flip(); 3016 new_space->Flip();
3044 new_space->ResetAllocationInfo(); 3017 new_space->ResetAllocationInfo();
3045 } 3018 }
3046 3019
3047 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { 3020 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3048 newspace_evacuation_candidates_.Rewind(0); 3021 newspace_evacuation_candidates_.Rewind(0);
3049 } 3022 }
3050 3023
3024
3051 class MarkCompactCollector::Evacuator : public Malloced { 3025 class MarkCompactCollector::Evacuator : public Malloced {
3052 public: 3026 public:
3053 // NewSpacePages with more live bytes than this threshold qualify for fast
3054 // evacuation.
3055 static int PageEvacuationThreshold() {
3056 if (FLAG_page_promotion)
3057 return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
3058 100;
3059 return NewSpacePage::kAllocatableMemory + kPointerSize;
3060 }
3061
3062 explicit Evacuator(MarkCompactCollector* collector) 3027 explicit Evacuator(MarkCompactCollector* collector)
3063 : collector_(collector), 3028 : collector_(collector),
3064 compaction_spaces_(collector->heap()), 3029 compaction_spaces_(collector->heap()),
3065 local_pretenuring_feedback_(HashMap::PointersMatch, 3030 local_pretenuring_feedback_(HashMap::PointersMatch,
3066 kInitialLocalPretenuringFeedbackCapacity), 3031 kInitialLocalPretenuringFeedbackCapacity),
3067 new_space_visitor_(collector->heap(), &compaction_spaces_, 3032 new_space_visitor_(collector->heap(), &compaction_spaces_,
3068 &local_pretenuring_feedback_), 3033 &local_pretenuring_feedback_),
3069 new_space_page_visitor(),
3070 old_space_visitor_(collector->heap(), &compaction_spaces_), 3034 old_space_visitor_(collector->heap(), &compaction_spaces_),
3071 duration_(0.0), 3035 duration_(0.0),
3072 bytes_compacted_(0) {} 3036 bytes_compacted_(0) {}
3073 3037
3074 inline bool EvacuatePage(MemoryChunk* chunk); 3038 inline bool EvacuatePage(MemoryChunk* chunk);
3075 3039
3076 // Merge back locally cached info sequentially. Note that this method needs 3040 // Merge back locally cached info sequentially. Note that this method needs
3077 // to be called from the main thread. 3041 // to be called from the main thread.
3078 inline void Finalize(); 3042 inline void Finalize();
3079 3043
3080 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } 3044 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3081 3045
3082 private: 3046 private:
3083 enum EvacuationMode {
3084 kObjectsNewToOld,
3085 kPageNewToOld,
3086 kObjectsOldToOld,
3087 };
3088
3089 static const int kInitialLocalPretenuringFeedbackCapacity = 256; 3047 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3090 3048
3091 inline Heap* heap() { return collector_->heap(); } 3049 Heap* heap() { return collector_->heap(); }
3092
3093 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3094 // Note: The order of checks is important in this function.
3095 if (chunk->InNewSpace()) return kObjectsNewToOld;
3096 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3097 return kPageNewToOld;
3098 DCHECK(chunk->IsEvacuationCandidate());
3099 return kObjectsOldToOld;
3100 }
3101 3050
3102 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { 3051 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3103 duration_ += duration; 3052 duration_ += duration;
3104 bytes_compacted_ += bytes_compacted; 3053 bytes_compacted_ += bytes_compacted;
3105 } 3054 }
3106 3055
3107 template <IterationMode mode, class Visitor> 3056 template <IterationMode mode>
3108 inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor); 3057 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
3109 3058
3110 MarkCompactCollector* collector_; 3059 MarkCompactCollector* collector_;
3111 3060
3112 // Locally cached collector data. 3061 // Locally cached collector data.
3113 CompactionSpaceCollection compaction_spaces_; 3062 CompactionSpaceCollection compaction_spaces_;
3114 HashMap local_pretenuring_feedback_; 3063 HashMap local_pretenuring_feedback_;
3115 3064
3116 // Visitors for the corresponding spaces. 3065 // Visitors for the corresponding spaces.
3117 EvacuateNewSpaceVisitor new_space_visitor_; 3066 EvacuateNewSpaceVisitor new_space_visitor_;
3118 EvacuateNewSpacePageVisitor new_space_page_visitor;
3119 EvacuateOldSpaceVisitor old_space_visitor_; 3067 EvacuateOldSpaceVisitor old_space_visitor_;
3120 3068
3121 // Book keeping info. 3069 // Book keeping info.
3122 double duration_; 3070 double duration_;
3123 intptr_t bytes_compacted_; 3071 intptr_t bytes_compacted_;
3124 }; 3072 };
3125 3073
3126 template <MarkCompactCollector::IterationMode mode, class Visitor> 3074 template <MarkCompactCollector::IterationMode mode>
3127 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p, 3075 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
3128 Visitor* visitor) { 3076 MemoryChunk* p, HeapObjectVisitor* visitor) {
3129 bool success = false; 3077 bool success = false;
3130 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || 3078 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
3131 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
3132 int saved_live_bytes = p->LiveBytes(); 3079 int saved_live_bytes = p->LiveBytes();
3133 double evacuation_time; 3080 double evacuation_time;
3134 { 3081 {
3135 AlwaysAllocateScope always_allocate(heap()->isolate()); 3082 AlwaysAllocateScope always_allocate(heap()->isolate());
3136 TimedScope timed_scope(&evacuation_time); 3083 TimedScope timed_scope(&evacuation_time);
3137 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode); 3084 success = collector_->VisitLiveObjects(p, visitor, mode);
3138 } 3085 }
3139 if (FLAG_trace_evacuation) { 3086 if (FLAG_trace_evacuation) {
3140 const char age_mark_tag = 3087 const char age_mark_tag =
3141 !p->InNewSpace() 3088 !p->InNewSpace()
3142 ? 'x' 3089 ? 'x'
3143 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) 3090 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
3144 ? '>' 3091 ? '>'
3145 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<' 3092 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
3146 : '#'; 3093 : '#';
3147 PrintIsolate(heap()->isolate(), 3094 PrintIsolate(heap()->isolate(),
3148 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c " 3095 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
3149 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n", 3096 "executable=%d live_bytes=%d time=%f\n",
3150 this, p, p->InNewSpace(), age_mark_tag, 3097 this, p, p->InNewSpace(), age_mark_tag,
3151 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
3152 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, 3098 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
3153 evacuation_time); 3099 evacuation_time);
3154 } 3100 }
3155 if (success) { 3101 if (success) {
3156 ReportCompactionProgress(evacuation_time, saved_live_bytes); 3102 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3157 } 3103 }
3158 return success; 3104 return success;
3159 } 3105 }
3160 3106
3161 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { 3107 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
3162 bool result = false; 3108 bool success = false;
3163 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), 3109 if (chunk->InNewSpace()) {
3164 NewSpacePage::kSweepingDone); 3110 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
3165 switch (ComputeEvacuationMode(chunk)) { 3111 NewSpacePage::kSweepingDone);
3166 case kObjectsNewToOld: 3112 success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
3167 result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_); 3113 DCHECK(success);
3168 DCHECK(result); 3114 USE(success);
3169 USE(result); 3115 } else {
3170 break; 3116 DCHECK(chunk->IsEvacuationCandidate());
3171 case kPageNewToOld: 3117 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
3172 result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor); 3118 success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
3173 DCHECK(result); 3119 if (!success) {
3174 USE(result); 3120 // Aborted compaction page. We can record slots here to have them
3175 break; 3121 // processed in parallel later on.
3176 case kObjectsOldToOld: 3122 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
3177 result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_); 3123 success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
3178 if (!result) { 3124 DCHECK(success);
3179 // Aborted compaction page. We can record slots here to have them 3125 USE(success);
3180 // processed in parallel later on. 3126 // We need to return failure here to indicate that we want this page added
3181 EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity()); 3127 // to the sweeper.
3182 result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor); 3128 return false;
3183 DCHECK(result); 3129 }
3184 USE(result);
3185 // We need to return failure here to indicate that we want this page
3186 // added to the sweeper.
3187 return false;
3188 }
3189 break;
3190 default:
3191 UNREACHABLE();
3192 } 3130 }
3193 return result; 3131 return success;
3194 } 3132 }
3195 3133
3196 void MarkCompactCollector::Evacuator::Finalize() { 3134 void MarkCompactCollector::Evacuator::Finalize() {
3197 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); 3135 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3198 heap()->code_space()->MergeCompactionSpace( 3136 heap()->code_space()->MergeCompactionSpace(
3199 compaction_spaces_.Get(CODE_SPACE)); 3137 compaction_spaces_.Get(CODE_SPACE));
3200 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); 3138 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3201 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() + 3139 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
3202 new_space_page_visitor.promoted_size());
3203 heap()->IncrementSemiSpaceCopiedObjectSize( 3140 heap()->IncrementSemiSpaceCopiedObjectSize(
3204 new_space_visitor_.semispace_copied_size()); 3141 new_space_visitor_.semispace_copied_size());
3205 heap()->IncrementYoungSurvivorsCounter( 3142 heap()->IncrementYoungSurvivorsCounter(
3206 new_space_visitor_.promoted_size() + 3143 new_space_visitor_.promoted_size() +
3207 new_space_visitor_.semispace_copied_size() + 3144 new_space_visitor_.semispace_copied_size());
3208 new_space_page_visitor.promoted_size());
3209 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); 3145 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3210 } 3146 }
3211 3147
3212 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, 3148 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3213 intptr_t live_bytes) { 3149 intptr_t live_bytes) {
3214 if (!FLAG_parallel_compaction) return 1; 3150 if (!FLAG_parallel_compaction) return 1;
3215 // Compute the number of needed tasks based on a target compaction time, the 3151 // Compute the number of needed tasks based on a target compaction time, the
3216 // profiled compaction speed and marked live memory. 3152 // profiled compaction speed and marked live memory.
3217 // 3153 //
3218 // The number of parallel compaction tasks is limited by: 3154 // The number of parallel compaction tasks is limited by:
(...skipping 29 matching lines...) Expand all
3248 3184
3249 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, 3185 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3250 MemoryChunk* chunk, PerPageData) { 3186 MemoryChunk* chunk, PerPageData) {
3251 return evacuator->EvacuatePage(chunk); 3187 return evacuator->EvacuatePage(chunk);
3252 } 3188 }
3253 3189
3254 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk, 3190 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3255 bool success, PerPageData data) { 3191 bool success, PerPageData data) {
3256 if (chunk->InNewSpace()) { 3192 if (chunk->InNewSpace()) {
3257 DCHECK(success); 3193 DCHECK(success);
3258 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3259 DCHECK(success);
3260 Page* p = static_cast<Page*>(chunk);
3261 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3262 p->ForAllFreeListCategories(
3263 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3264 heap->mark_compact_collector()->sweeper().AddLatePage(
3265 p->owner()->identity(), p);
3266 } else { 3194 } else {
3267 Page* p = static_cast<Page*>(chunk); 3195 Page* p = static_cast<Page*>(chunk);
3268 if (success) { 3196 if (success) {
3269 DCHECK(p->IsEvacuationCandidate()); 3197 DCHECK(p->IsEvacuationCandidate());
3270 DCHECK(p->SweepingDone()); 3198 DCHECK(p->SweepingDone());
3271 p->Unlink(); 3199 p->Unlink();
3272 } else { 3200 } else {
3273 // We have partially compacted the page, i.e., some objects may have 3201 // We have partially compacted the page, i.e., some objects may have
3274 // moved, others are still in place. 3202 // moved, others are still in place.
3275 p->SetFlag(Page::COMPACTION_WAS_ABORTED); 3203 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3276 p->ClearEvacuationCandidate(); 3204 p->ClearEvacuationCandidate();
3277 // Slots have already been recorded so we just need to add it to the 3205 // Slots have already been recorded so we just need to add it to the
3278 // sweeper. 3206 // sweeper.
3279 *data += 1; 3207 *data += 1;
3280 } 3208 }
3281 } 3209 }
3282 } 3210 }
3283 }; 3211 };
3284 3212
3285 void MarkCompactCollector::EvacuatePagesInParallel() { 3213 void MarkCompactCollector::EvacuatePagesInParallel() {
3286 PageParallelJob<EvacuationJobTraits> job( 3214 PageParallelJob<EvacuationJobTraits> job(
3287 heap_, heap_->isolate()->cancelable_task_manager()); 3215 heap_, heap_->isolate()->cancelable_task_manager());
3288 3216
3289 int abandoned_pages = 0; 3217 int abandoned_pages = 0;
3290 intptr_t live_bytes = 0; 3218 intptr_t live_bytes = 0;
3291 for (Page* page : evacuation_candidates_) { 3219 for (Page* page : evacuation_candidates_) {
3292 live_bytes += page->LiveBytes(); 3220 live_bytes += page->LiveBytes();
3293 job.AddPage(page, &abandoned_pages); 3221 job.AddPage(page, &abandoned_pages);
3294 } 3222 }
3295 const Address age_mark = heap()->new_space()->age_mark();
3296 for (NewSpacePage* page : newspace_evacuation_candidates_) { 3223 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3297 live_bytes += page->LiveBytes(); 3224 live_bytes += page->LiveBytes();
3298 if (!page->NeverEvacuate() &&
3299 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3300 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
3301 !page->Contains(age_mark)) {
3302 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3303 }
3304 job.AddPage(page, &abandoned_pages); 3225 job.AddPage(page, &abandoned_pages);
3305 } 3226 }
3306 DCHECK_GE(job.NumberOfPages(), 1); 3227 DCHECK_GE(job.NumberOfPages(), 1);
3307 3228
3308 // Used for trace summary. 3229 // Used for trace summary.
3309 double compaction_speed = 0; 3230 double compaction_speed = 0;
3310 if (FLAG_trace_evacuation) { 3231 if (FLAG_trace_evacuation) {
3311 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3232 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3312 } 3233 }
3313 3234
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
3453 #ifdef VERIFY_HEAP 3374 #ifdef VERIFY_HEAP
3454 static void VerifyAllBlackObjects(MemoryChunk* page) { 3375 static void VerifyAllBlackObjects(MemoryChunk* page) {
3455 LiveObjectIterator<kAllLiveObjects> it(page); 3376 LiveObjectIterator<kAllLiveObjects> it(page);
3456 HeapObject* object = NULL; 3377 HeapObject* object = NULL;
3457 while ((object = it.Next()) != NULL) { 3378 while ((object = it.Next()) != NULL) {
3458 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3379 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3459 } 3380 }
3460 } 3381 }
3461 #endif // VERIFY_HEAP 3382 #endif // VERIFY_HEAP
3462 3383
3463 template <class Visitor> 3384
3464 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor, 3385 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
3386 HeapObjectVisitor* visitor,
3465 IterationMode mode) { 3387 IterationMode mode) {
3466 #ifdef VERIFY_HEAP 3388 #ifdef VERIFY_HEAP
3467 VerifyAllBlackObjects(page); 3389 VerifyAllBlackObjects(page);
3468 #endif // VERIFY_HEAP 3390 #endif // VERIFY_HEAP
3469 3391
3470 LiveObjectIterator<kBlackObjects> it(page); 3392 LiveObjectIterator<kBlackObjects> it(page);
3471 HeapObject* object = nullptr; 3393 HeapObject* object = nullptr;
3472 while ((object = it.Next()) != nullptr) { 3394 while ((object = it.Next()) != nullptr) {
3473 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3395 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3474 if (!visitor->Visit(object)) { 3396 if (!visitor->Visit(object)) {
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
3615 RememberedSet<OLD_TO_OLD>::IterateTyped( 3537 RememberedSet<OLD_TO_OLD>::IterateTyped(
3616 chunk, [isolate, visitor](SlotType type, Address slot) { 3538 chunk, [isolate, visitor](SlotType type, Address slot) {
3617 UpdateTypedSlot(isolate, visitor, type, slot); 3539 UpdateTypedSlot(isolate, visitor, type, slot);
3618 return REMOVE_SLOT; 3540 return REMOVE_SLOT;
3619 }); 3541 });
3620 } 3542 }
3621 } 3543 }
3622 3544
3623 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { 3545 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
3624 MapWord map_word = object->map_word(); 3546 MapWord map_word = object->map_word();
3625 // There could still be stale pointers in large object space, map space, 3547 // Since we only filter invalid slots in old space, the store buffer can
3626 // and old space for pages that have been promoted. 3548 // still contain stale pointers in large object and in map spaces. Ignore
3549 // these pointers here.
3550 DCHECK(map_word.IsForwardingAddress() ||
3551 !object->GetHeap()->old_space()->Contains(
3552 reinterpret_cast<Address>(address)));
3627 if (map_word.IsForwardingAddress()) { 3553 if (map_word.IsForwardingAddress()) {
3628 // Update the corresponding slot. 3554 // Update the corresponding slot.
3629 *address = map_word.ToForwardingAddress(); 3555 *address = map_word.ToForwardingAddress();
3630 } 3556 }
3631 } 3557 }
3632 }; 3558 };
3633 3559
3634 int NumberOfPointerUpdateTasks(int pages) { 3560 int NumberOfPointerUpdateTasks(int pages) {
3635 if (!FLAG_parallel_pointer_update) return 1; 3561 if (!FLAG_parallel_pointer_update) return 1;
3636 const int kMaxTasks = 4; 3562 const int kMaxTasks = 4;
(...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after
3952 MarkBit mark_bit = Marking::MarkBitFrom(host); 3878 MarkBit mark_bit = Marking::MarkBitFrom(host);
3953 if (Marking::IsBlack(mark_bit)) { 3879 if (Marking::IsBlack(mark_bit)) {
3954 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 3880 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3955 RecordRelocSlot(host, &rinfo, target); 3881 RecordRelocSlot(host, &rinfo, target);
3956 } 3882 }
3957 } 3883 }
3958 } 3884 }
3959 3885
3960 } // namespace internal 3886 } // namespace internal
3961 } // namespace v8 3887 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698