OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
53 state_(IDLE), | 53 state_(IDLE), |
54 #endif | 54 #endif |
55 marking_parity_(ODD_MARKING_PARITY), | 55 marking_parity_(ODD_MARKING_PARITY), |
56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
57 evacuation_(false), | 57 evacuation_(false), |
58 heap_(heap), | 58 heap_(heap), |
59 marking_deque_memory_(NULL), | 59 marking_deque_memory_(NULL), |
60 marking_deque_memory_committed_(0), | 60 marking_deque_memory_committed_(0), |
61 code_flusher_(nullptr), | 61 code_flusher_(nullptr), |
62 have_code_to_deoptimize_(false), | 62 have_code_to_deoptimize_(false), |
63 sweeping_list_shared_(nullptr), | |
63 compacting_(false), | 64 compacting_(false), |
64 sweeping_in_progress_(false), | 65 sweeping_in_progress_(false), |
65 pending_sweeper_tasks_semaphore_(0), | 66 pending_sweeper_tasks_semaphore_(0), |
66 pending_compaction_tasks_semaphore_(0) { | 67 pending_compaction_tasks_semaphore_(0) { |
67 } | 68 } |
68 | 69 |
69 #ifdef VERIFY_HEAP | 70 #ifdef VERIFY_HEAP |
70 class VerifyMarkingVisitor : public ObjectVisitor { | 71 class VerifyMarkingVisitor : public ObjectVisitor { |
71 public: | 72 public: |
72 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 73 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
474 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 475 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
475 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 476 const int offset = space_to_start_ - FIRST_PAGED_SPACE; |
476 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 477 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
477 for (int i = 0; i < num_spaces; i++) { | 478 for (int i = 0; i < num_spaces; i++) { |
478 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 479 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); |
479 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 480 DCHECK_GE(space_id, FIRST_PAGED_SPACE); |
480 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 481 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
481 heap_->mark_compact_collector()->SweepInParallel( | 482 heap_->mark_compact_collector()->SweepInParallel( |
482 heap_->paged_space(space_id), 0); | 483 heap_->paged_space(space_id), 0); |
483 } | 484 } |
485 std::vector<Page*>* shared_sweeping_list = nullptr; | |
486 { | |
487 base::LockGuard<base::Mutex> guard( | |
488 heap_->mark_compact_collector()->swept_pages_mutex()); | |
489 shared_sweeping_list = | |
490 heap_->mark_compact_collector()->sweeping_list_shared_; | |
491 } | |
492 if (shared_sweeping_list != nullptr) { | |
493 heap_->mark_compact_collector()->SweepInParallel(*shared_sweeping_list, | |
494 heap_->old_space(), 0); | |
495 } | |
484 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal(); | 496 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal(); |
485 } | 497 } |
486 | 498 |
487 Heap* heap_; | 499 Heap* heap_; |
488 AllocationSpace space_to_start_; | 500 AllocationSpace space_to_start_; |
489 | 501 |
490 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 502 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
491 }; | 503 }; |
492 | 504 |
493 | 505 |
494 void MarkCompactCollector::StartSweeperThreads() { | 506 void MarkCompactCollector::StartSweeperThreads() { |
495 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 507 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
496 new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask); | 508 new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask); |
497 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 509 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
498 new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask); | 510 new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask); |
499 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
500 new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask); | |
501 } | 511 } |
502 | 512 |
503 | 513 |
504 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { | 514 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { |
505 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | 515 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
506 if (!page->SweepingDone()) { | 516 if (!page->SweepingDone()) { |
507 SweepInParallel(page, owner); | 517 SweepInParallel(page, owner); |
508 if (!page->SweepingDone()) { | 518 if (!page->SweepingDone()) { |
509 // We were not able to sweep that page, i.e., a concurrent | 519 // We were not able to sweep that page, i.e., a concurrent |
510 // sweeper thread currently owns this page. Wait for the sweeper | 520 // sweeper thread currently owns this page. Wait for the sweeper |
(...skipping 1247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1758 return allocation; | 1768 return allocation; |
1759 } | 1769 } |
1760 | 1770 |
1761 LocalAllocationBuffer buffer_; | 1771 LocalAllocationBuffer buffer_; |
1762 AllocationSpace space_to_allocate_; | 1772 AllocationSpace space_to_allocate_; |
1763 intptr_t promoted_size_; | 1773 intptr_t promoted_size_; |
1764 intptr_t semispace_copied_size_; | 1774 intptr_t semispace_copied_size_; |
1765 HashMap* local_pretenuring_feedback_; | 1775 HashMap* local_pretenuring_feedback_; |
1766 }; | 1776 }; |
1767 | 1777 |
1778 class MarkCompactCollector::EvacuateNewSpacePageVisitor final | |
1779 : public MarkCompactCollector::HeapObjectVisitor { | |
1780 public: | |
1781 EvacuateNewSpacePageVisitor() : promoted_size_(0) {} | |
1782 | |
1783 static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) { | |
1784 page->heap()->new_space()->ReplaceWithEmptyPage(page); | |
Hannes Payer (out of office)
2016/04/08 10:42:40
Why don't we take care of the new space size at th
Michael Lippautz
2016/04/08 11:30:00
As discussed offline: Let's keep it for now. We ca
| |
1785 Page* new_page = Page::Convert(page, owner); | |
1786 new_page->SetFlag(Page::FAST_EVACUATION); | |
1787 } | |
1788 | |
1789 bool Visit(HeapObject* object) { | |
1790 promoted_size_ += object->Size(); | |
1791 if (V8_UNLIKELY(object->IsJSArrayBuffer())) { | |
1792 object->GetHeap()->array_buffer_tracker()->Promote( | |
1793 JSArrayBuffer::cast(object)); | |
1794 } | |
1795 RecordMigratedSlotVisitor visitor; | |
1796 object->IterateBodyFast(&visitor); | |
1797 return true; | |
1798 } | |
1799 | |
1800 intptr_t promoted_size() { return promoted_size_; } | |
1801 | |
1802 private: | |
1803 intptr_t promoted_size_; | |
1804 }; | |
1768 | 1805 |
1769 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1806 class MarkCompactCollector::EvacuateOldSpaceVisitor final |
1770 : public MarkCompactCollector::EvacuateVisitorBase { | 1807 : public MarkCompactCollector::EvacuateVisitorBase { |
1771 public: | 1808 public: |
1772 EvacuateOldSpaceVisitor(Heap* heap, | 1809 EvacuateOldSpaceVisitor(Heap* heap, |
1773 CompactionSpaceCollection* compaction_spaces) | 1810 CompactionSpaceCollection* compaction_spaces) |
1774 : EvacuateVisitorBase(heap, compaction_spaces) {} | 1811 : EvacuateVisitorBase(heap, compaction_spaces) {} |
1775 | 1812 |
1776 bool Visit(HeapObject* object) override { | 1813 bool Visit(HeapObject* object) override { |
1777 CompactionSpace* target_space = compaction_spaces_->Get( | 1814 CompactionSpace* target_space = compaction_spaces_->Get( |
(...skipping 1145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2923 newspace_evacuation_candidates_.Add(it.next()); | 2960 newspace_evacuation_candidates_.Add(it.next()); |
2924 } | 2961 } |
2925 new_space->Flip(); | 2962 new_space->Flip(); |
2926 new_space->ResetAllocationInfo(); | 2963 new_space->ResetAllocationInfo(); |
2927 } | 2964 } |
2928 | 2965 |
2929 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { | 2966 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { |
2930 newspace_evacuation_candidates_.Rewind(0); | 2967 newspace_evacuation_candidates_.Rewind(0); |
2931 } | 2968 } |
2932 | 2969 |
2933 | |
2934 class MarkCompactCollector::Evacuator : public Malloced { | 2970 class MarkCompactCollector::Evacuator : public Malloced { |
2935 public: | 2971 public: |
2972 // NewSpacePages with more live bytes than this threshold qualify for fast | |
2973 // evacuation. | |
2974 static int FastEvacuationThreshold() { | |
2975 return FLAG_page_evacuation_threshold * NewSpacePage::kAllocatableMemory / | |
2976 100; | |
2977 } | |
2978 | |
2936 explicit Evacuator(MarkCompactCollector* collector) | 2979 explicit Evacuator(MarkCompactCollector* collector) |
2937 : collector_(collector), | 2980 : collector_(collector), |
2938 compaction_spaces_(collector->heap()), | 2981 compaction_spaces_(collector->heap()), |
2939 local_pretenuring_feedback_(HashMap::PointersMatch, | 2982 local_pretenuring_feedback_(HashMap::PointersMatch, |
2940 kInitialLocalPretenuringFeedbackCapacity), | 2983 kInitialLocalPretenuringFeedbackCapacity), |
2941 new_space_visitor_(collector->heap(), &compaction_spaces_, | 2984 evac_new_space_visitor_(collector->heap(), &compaction_spaces_, |
2942 &local_pretenuring_feedback_), | 2985 &local_pretenuring_feedback_), |
2943 old_space_visitor_(collector->heap(), &compaction_spaces_), | 2986 evac_new_space_page_visitor_(), |
2987 evac_old_space_visitor_(collector->heap(), &compaction_spaces_), | |
2944 duration_(0.0), | 2988 duration_(0.0), |
2945 bytes_compacted_(0) {} | 2989 bytes_compacted_(0) {} |
2946 | 2990 |
2947 inline bool EvacuatePage(MemoryChunk* chunk); | 2991 inline bool EvacuatePage(MemoryChunk* chunk); |
2948 | 2992 |
2949 // Merge back locally cached info sequentially. Note that this method needs | 2993 // Merge back locally cached info sequentially. Note that this method needs |
2950 // to be called from the main thread. | 2994 // to be called from the main thread. |
2951 inline void Finalize(); | 2995 inline void Finalize(); |
2952 | 2996 |
2953 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | 2997 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } |
2954 | 2998 |
2955 private: | 2999 private: |
2956 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | 3000 static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
2957 | 3001 |
2958 Heap* heap() { return collector_->heap(); } | 3002 Heap* heap() { return collector_->heap(); } |
2959 | 3003 |
2960 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { | 3004 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { |
2961 duration_ += duration; | 3005 duration_ += duration; |
2962 bytes_compacted_ += bytes_compacted; | 3006 bytes_compacted_ += bytes_compacted; |
2963 } | 3007 } |
2964 | 3008 |
3009 template <IterationMode mode> | |
2965 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); | 3010 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); |
2966 | 3011 |
2967 MarkCompactCollector* collector_; | 3012 MarkCompactCollector* collector_; |
2968 | 3013 |
2969 // Locally cached collector data. | 3014 // Locally cached collector data. |
2970 CompactionSpaceCollection compaction_spaces_; | 3015 CompactionSpaceCollection compaction_spaces_; |
2971 HashMap local_pretenuring_feedback_; | 3016 HashMap local_pretenuring_feedback_; |
2972 | 3017 |
2973 // Visitors for the corresponding spaces. | 3018 // Visitors for the corresponding spaces. |
2974 EvacuateNewSpaceVisitor new_space_visitor_; | 3019 EvacuateNewSpaceVisitor evac_new_space_visitor_; |
2975 EvacuateOldSpaceVisitor old_space_visitor_; | 3020 EvacuateNewSpacePageVisitor evac_new_space_page_visitor_; |
3021 EvacuateOldSpaceVisitor evac_old_space_visitor_; | |
2976 | 3022 |
2977 // Book keeping info. | 3023 // Book keeping info. |
2978 double duration_; | 3024 double duration_; |
2979 intptr_t bytes_compacted_; | 3025 intptr_t bytes_compacted_; |
2980 }; | 3026 }; |
2981 | 3027 |
3028 template <MarkCompactCollector::IterationMode mode> | |
2982 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( | 3029 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( |
2983 MemoryChunk* p, HeapObjectVisitor* visitor) { | 3030 MemoryChunk* p, HeapObjectVisitor* visitor) { |
2984 bool success = false; | 3031 bool success = false; |
2985 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); | 3032 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() || |
3033 p->IsFlagSet(Page::FAST_EVACUATION)); | |
2986 int saved_live_bytes = p->LiveBytes(); | 3034 int saved_live_bytes = p->LiveBytes(); |
2987 double evacuation_time; | 3035 double evacuation_time; |
2988 { | 3036 { |
2989 AlwaysAllocateScope always_allocate(heap()->isolate()); | 3037 AlwaysAllocateScope always_allocate(heap()->isolate()); |
2990 TimedScope timed_scope(&evacuation_time); | 3038 TimedScope timed_scope(&evacuation_time); |
2991 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits); | 3039 success = collector_->VisitLiveObjects(p, visitor, mode); |
2992 } | 3040 } |
2993 if (FLAG_trace_evacuation) { | 3041 if (FLAG_trace_evacuation) { |
2994 PrintIsolate(heap()->isolate(), | 3042 PrintIsolate( |
2995 "evacuation[%p]: page=%p new_space=%d executable=%d " | 3043 heap()->isolate(), |
2996 "live_bytes=%d time=%f\n", | 3044 "evacuation[%p]: page=%p page_evacuation=%d new_space=%d executable=%d " |
2997 this, p, p->InNewSpace(), | 3045 "live_bytes=%d time=%f\n", |
2998 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, | 3046 this, p, mode == kKeepMarking, p->InNewSpace(), |
2999 evacuation_time); | 3047 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes, |
3048 evacuation_time); | |
3000 } | 3049 } |
3001 if (success) { | 3050 if (success) { |
3002 ReportCompactionProgress(evacuation_time, saved_live_bytes); | 3051 ReportCompactionProgress(evacuation_time, saved_live_bytes); |
3003 } | 3052 } |
3004 return success; | 3053 return success; |
3005 } | 3054 } |
3006 | 3055 |
3007 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { | 3056 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { |
3008 bool success = false; | 3057 bool success = false; |
3009 if (chunk->InNewSpace()) { | 3058 if (chunk->InNewSpace()) { |
3010 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), | 3059 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), |
3011 NewSpacePage::kSweepingDone); | 3060 NewSpacePage::kSweepingDone); |
3012 success = EvacuateSinglePage(chunk, &new_space_visitor_); | 3061 success = |
3062 EvacuateSinglePage<kClearMarkbits>(chunk, &evac_new_space_visitor_); | |
3013 DCHECK(success); | 3063 DCHECK(success); |
3014 USE(success); | 3064 USE(success); |
3015 } else { | 3065 } else { |
3016 DCHECK(chunk->IsEvacuationCandidate()); | |
3017 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); | 3066 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); |
3018 success = EvacuateSinglePage(chunk, &old_space_visitor_); | 3067 if (chunk->IsFlagSet(MemoryChunk::FAST_EVACUATION)) { |
3068 success = EvacuateSinglePage<kKeepMarking>(chunk, | |
3069 &evac_new_space_page_visitor_); | |
3070 } else { | |
3071 DCHECK(chunk->IsEvacuationCandidate()); | |
3072 success = | |
3073 EvacuateSinglePage<kClearMarkbits>(chunk, &evac_old_space_visitor_); | |
3074 } | |
3019 } | 3075 } |
3020 return success; | 3076 return success; |
3021 } | 3077 } |
3022 | 3078 |
3023 void MarkCompactCollector::Evacuator::Finalize() { | 3079 void MarkCompactCollector::Evacuator::Finalize() { |
3024 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3080 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
3025 heap()->code_space()->MergeCompactionSpace( | 3081 heap()->code_space()->MergeCompactionSpace( |
3026 compaction_spaces_.Get(CODE_SPACE)); | 3082 compaction_spaces_.Get(CODE_SPACE)); |
3027 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3083 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
3028 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); | 3084 heap()->IncrementPromotedObjectsSize( |
3085 evac_new_space_visitor_.promoted_size() + | |
3086 evac_new_space_page_visitor_.promoted_size()); | |
3029 heap()->IncrementSemiSpaceCopiedObjectSize( | 3087 heap()->IncrementSemiSpaceCopiedObjectSize( |
3030 new_space_visitor_.semispace_copied_size()); | 3088 evac_new_space_visitor_.semispace_copied_size()); |
3031 heap()->IncrementYoungSurvivorsCounter( | 3089 heap()->IncrementYoungSurvivorsCounter( |
3032 new_space_visitor_.promoted_size() + | 3090 evac_new_space_visitor_.promoted_size() + |
3033 new_space_visitor_.semispace_copied_size()); | 3091 evac_new_space_visitor_.semispace_copied_size() + |
3092 evac_new_space_page_visitor_.promoted_size()); | |
3034 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | 3093 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
3035 } | 3094 } |
3036 | 3095 |
3037 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3096 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
3038 intptr_t live_bytes) { | 3097 intptr_t live_bytes) { |
3039 if (!FLAG_parallel_compaction) return 1; | 3098 if (!FLAG_parallel_compaction) return 1; |
3040 // Compute the number of needed tasks based on a target compaction time, the | 3099 // Compute the number of needed tasks based on a target compaction time, the |
3041 // profiled compaction speed and marked live memory. | 3100 // profiled compaction speed and marked live memory. |
3042 // | 3101 // |
3043 // The number of parallel compaction tasks is limited by: | 3102 // The number of parallel compaction tasks is limited by: |
(...skipping 29 matching lines...) Expand all Loading... | |
3073 | 3132 |
3074 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, | 3133 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
3075 MemoryChunk* chunk, PerPageData) { | 3134 MemoryChunk* chunk, PerPageData) { |
3076 return evacuator->EvacuatePage(chunk); | 3135 return evacuator->EvacuatePage(chunk); |
3077 } | 3136 } |
3078 | 3137 |
3079 static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success, | 3138 static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success, |
3080 PerPageData data) { | 3139 PerPageData data) { |
3081 if (chunk->InNewSpace()) { | 3140 if (chunk->InNewSpace()) { |
3082 DCHECK(success); | 3141 DCHECK(success); |
3142 } else if (chunk->IsFlagSet(Page::FAST_EVACUATION)) { | |
3143 // Nothing to do here, as the page is still owned by the compaction space. | |
3083 } else { | 3144 } else { |
3084 Page* p = static_cast<Page*>(chunk); | 3145 Page* p = static_cast<Page*>(chunk); |
3085 if (success) { | 3146 if (success) { |
3086 DCHECK(p->IsEvacuationCandidate()); | 3147 DCHECK(p->IsEvacuationCandidate()); |
3087 DCHECK(p->SweepingDone()); | 3148 DCHECK(p->SweepingDone()); |
3088 p->Unlink(); | 3149 p->Unlink(); |
3089 } else { | 3150 } else { |
3090 // We have partially compacted the page, i.e., some objects may have | 3151 // We have partially compacted the page, i.e., some objects may have |
3091 // moved, others are still in place. | 3152 // moved, others are still in place. |
3092 // We need to: | 3153 // We need to: |
(...skipping 18 matching lines...) Expand all Loading... | |
3111 heap_, heap_->isolate()->cancelable_task_manager()); | 3172 heap_, heap_->isolate()->cancelable_task_manager()); |
3112 | 3173 |
3113 int abandoned_pages = 0; | 3174 int abandoned_pages = 0; |
3114 intptr_t live_bytes = 0; | 3175 intptr_t live_bytes = 0; |
3115 for (Page* page : evacuation_candidates_) { | 3176 for (Page* page : evacuation_candidates_) { |
3116 live_bytes += page->LiveBytes(); | 3177 live_bytes += page->LiveBytes(); |
3117 job.AddPage(page, &abandoned_pages); | 3178 job.AddPage(page, &abandoned_pages); |
3118 } | 3179 } |
3119 for (NewSpacePage* page : newspace_evacuation_candidates_) { | 3180 for (NewSpacePage* page : newspace_evacuation_candidates_) { |
3120 live_bytes += page->LiveBytes(); | 3181 live_bytes += page->LiveBytes(); |
3182 if (!page->NeverEvacuate() && | |
3183 (page->LiveBytes() > Evacuator::FastEvacuationThreshold()) && | |
3184 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) { | |
3185 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space()); | |
3186 } | |
3121 job.AddPage(page, &abandoned_pages); | 3187 job.AddPage(page, &abandoned_pages); |
3122 } | 3188 } |
3123 DCHECK_GE(job.NumberOfPages(), 1); | 3189 DCHECK_GE(job.NumberOfPages(), 1); |
3124 | 3190 |
3125 // Used for trace summary. | 3191 // Used for trace summary. |
3126 double compaction_speed = 0; | 3192 double compaction_speed = 0; |
3127 if (FLAG_trace_evacuation) { | 3193 if (FLAG_trace_evacuation) { |
3128 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3194 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3129 } | 3195 } |
3130 | 3196 |
3131 const int wanted_num_tasks = | 3197 const int wanted_num_tasks = |
3132 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); | 3198 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); |
3133 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; | 3199 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; |
3134 for (int i = 0; i < wanted_num_tasks; i++) { | 3200 for (int i = 0; i < wanted_num_tasks; i++) { |
3135 evacuators[i] = new Evacuator(this); | 3201 evacuators[i] = new Evacuator(this); |
3136 } | 3202 } |
3137 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); | 3203 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
3138 for (int i = 0; i < wanted_num_tasks; i++) { | 3204 for (int i = 0; i < wanted_num_tasks; i++) { |
3139 evacuators[i]->Finalize(); | 3205 evacuators[i]->Finalize(); |
3140 delete evacuators[i]; | 3206 delete evacuators[i]; |
3141 } | 3207 } |
3142 delete[] evacuators; | 3208 delete[] evacuators; |
3143 | 3209 |
3210 bool fast_evac_pages = false; | |
Hannes Payer (out of office)
2016/04/08 10:42:40
Can you factor this block of code out into a metho
Michael Lippautz
2016/04/08 11:30:00
Done.
| |
3211 std::vector<Page*>* shared_sweep_list = nullptr; | |
3212 for (MemoryChunk* chunk : newspace_evacuation_candidates_) { | |
3213 if (chunk->IsFlagSet(Page::FAST_EVACUATION)) { | |
3214 Page* page = reinterpret_cast<Page*>(chunk); | |
3215 page->ClearFlag(Page::FAST_EVACUATION); | |
3216 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | |
3217 PagedSpace* space = static_cast<PagedSpace*>(page->owner()); | |
3218 DCHECK_EQ(space, heap()->old_space()); | |
3219 int to_sweep = page->area_size() - page->LiveBytes(); | |
3220 space->accounting_stats_.ShrinkSpace(to_sweep); | |
3221 space->UnlinkFreeListCategories(page); | |
Hannes Payer (out of office)
2016/04/08 10:42:40
New space pages never have free lists. Can you DCH
Michael Lippautz
2016/04/08 11:30:00
Done.
| |
3222 page->ForAllFreeListCategories( | |
3223 [](FreeListCategory* category) { category->Reset(); }); | |
3224 if (shared_sweep_list == nullptr) { | |
3225 shared_sweep_list = new std::vector<Page*>(); | |
3226 } | |
3227 shared_sweep_list->push_back(page); | |
3228 fast_evac_pages = true; | |
3229 } | |
3230 } | |
3231 if (fast_evac_pages) { | |
3232 { | |
3233 base::LockGuard<base::Mutex> guard(swept_pages_mutex()); | |
3234 sweeping_list_shared_ = shared_sweep_list; | |
3235 } | |
3236 heap() | |
3237 ->external_string_table_ | |
3238 .CleanUp<Heap::ExternalStringTable::CleanupMode::kPromoteOnly>(); | |
3239 } | |
3240 if (FLAG_concurrent_sweeping) { | |
3241 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
3242 new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask); | |
3243 } else { | |
3244 if (fast_evac_pages) { | |
3245 SweepInParallel(*sweeping_list_shared_, heap()->old_space(), 0); | |
3246 } | |
3247 } | |
3248 | |
3144 if (FLAG_trace_evacuation) { | 3249 if (FLAG_trace_evacuation) { |
3145 PrintIsolate( | 3250 PrintIsolate( |
3146 isolate(), | 3251 isolate(), |
3147 "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d " | 3252 "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d " |
3148 "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX | 3253 "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX |
3149 "d compaction_speed=%.f\n", | 3254 "d compaction_speed=%.f\n", |
3150 isolate()->time_millis_since_init(), | 3255 isolate()->time_millis_since_init(), |
3151 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), | 3256 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(), |
3152 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), | 3257 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(), |
3153 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), | 3258 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3245 if (free_space_mode == ZAP_FREE_SPACE) { | 3350 if (free_space_mode == ZAP_FREE_SPACE) { |
3246 memset(free_start, 0xcc, size); | 3351 memset(free_start, 0xcc, size); |
3247 } | 3352 } |
3248 freed_bytes = space->UnaccountedFree(free_start, size); | 3353 freed_bytes = space->UnaccountedFree(free_start, size); |
3249 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3354 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
3250 } | 3355 } |
3251 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3356 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3252 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3357 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
3253 } | 3358 } |
3254 | 3359 |
3255 | |
3256 void MarkCompactCollector::InvalidateCode(Code* code) { | 3360 void MarkCompactCollector::InvalidateCode(Code* code) { |
3257 if (heap_->incremental_marking()->IsCompacting() && | 3361 if (heap_->incremental_marking()->IsCompacting() && |
3258 !ShouldSkipEvacuationSlotRecording(code)) { | 3362 !ShouldSkipEvacuationSlotRecording(code)) { |
3259 DCHECK(compacting_); | 3363 DCHECK(compacting_); |
3260 | 3364 |
3261 // If the object is white than no slots were recorded on it yet. | 3365 // If the object is white than no slots were recorded on it yet. |
3262 MarkBit mark_bit = Marking::MarkBitFrom(code); | 3366 MarkBit mark_bit = Marking::MarkBitFrom(code); |
3263 if (Marking::IsWhite(mark_bit)) return; | 3367 if (Marking::IsWhite(mark_bit)) return; |
3264 | 3368 |
3265 // Ignore all slots that might have been recorded in the body of the | 3369 // Ignore all slots that might have been recorded in the body of the |
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3465 RememberedSet<OLD_TO_OLD>::IterateTyped( | 3569 RememberedSet<OLD_TO_OLD>::IterateTyped( |
3466 chunk, [isolate, visitor](SlotType type, Address slot) { | 3570 chunk, [isolate, visitor](SlotType type, Address slot) { |
3467 UpdateTypedSlot(isolate, visitor, type, slot); | 3571 UpdateTypedSlot(isolate, visitor, type, slot); |
3468 return REMOVE_SLOT; | 3572 return REMOVE_SLOT; |
3469 }); | 3573 }); |
3470 } | 3574 } |
3471 } | 3575 } |
3472 | 3576 |
3473 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { | 3577 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { |
3474 MapWord map_word = object->map_word(); | 3578 MapWord map_word = object->map_word(); |
3475 // Since we only filter invalid slots in old space, the store buffer can | 3579 // There could still be stale pointers in large object space, map space, |
3476 // still contain stale pointers in large object and in map spaces. Ignore | 3580 // and old space for pages that have been promoted. |
3477 // these pointers here. | |
3478 DCHECK(map_word.IsForwardingAddress() || | |
3479 !object->GetHeap()->old_space()->Contains( | |
3480 reinterpret_cast<Address>(address))); | |
3481 if (map_word.IsForwardingAddress()) { | 3581 if (map_word.IsForwardingAddress()) { |
3482 // Update the corresponding slot. | 3582 // Update the corresponding slot. |
3483 *address = map_word.ToForwardingAddress(); | 3583 *address = map_word.ToForwardingAddress(); |
3484 } | 3584 } |
3485 } | 3585 } |
3486 }; | 3586 }; |
3487 | 3587 |
3488 int NumberOfPointerUpdateTasks(int pages) { | 3588 int NumberOfPointerUpdateTasks(int pages) { |
3489 if (!FLAG_parallel_pointer_update) return 1; | 3589 if (!FLAG_parallel_pointer_update) return 1; |
3490 const int kMaxTasks = 4; | 3590 const int kMaxTasks = 4; |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3606 } | 3706 } |
3607 evacuation_candidates_.Rewind(0); | 3707 evacuation_candidates_.Rewind(0); |
3608 compacting_ = false; | 3708 compacting_ = false; |
3609 heap()->FreeQueuedChunks(); | 3709 heap()->FreeQueuedChunks(); |
3610 } | 3710 } |
3611 | 3711 |
3612 | 3712 |
3613 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | 3713 int MarkCompactCollector::SweepInParallel(PagedSpace* space, |
3614 int required_freed_bytes, | 3714 int required_freed_bytes, |
3615 int max_pages) { | 3715 int max_pages) { |
3716 return SweepInParallel(sweeping_list(space), space, required_freed_bytes, | |
3717 max_pages); | |
3718 } | |
3719 | |
3720 int MarkCompactCollector::SweepInParallel(std::vector<Page*>& pages, | |
3721 PagedSpace* space, | |
3722 int required_freed_bytes, | |
3723 int max_pages) { | |
3616 int max_freed = 0; | 3724 int max_freed = 0; |
3617 int max_freed_overall = 0; | 3725 int max_freed_overall = 0; |
3618 int page_count = 0; | 3726 int page_count = 0; |
3619 for (Page* p : sweeping_list(space)) { | 3727 for (Page* p : pages) { |
3620 max_freed = SweepInParallel(p, space); | 3728 max_freed = SweepInParallel(p, space); |
3621 DCHECK(max_freed >= 0); | 3729 DCHECK(max_freed >= 0); |
3622 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { | 3730 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { |
3623 return max_freed; | 3731 return max_freed; |
3624 } | 3732 } |
3625 max_freed_overall = Max(max_freed, max_freed_overall); | 3733 max_freed_overall = Max(max_freed, max_freed_overall); |
3626 page_count++; | 3734 page_count++; |
3627 if (max_pages > 0 && page_count >= max_pages) { | 3735 if (max_pages > 0 && page_count >= max_pages) { |
3628 break; | 3736 break; |
3629 } | 3737 } |
3630 } | 3738 } |
3631 return max_freed_overall; | 3739 return max_freed_overall; |
3632 } | 3740 } |
3633 | 3741 |
3634 | |
3635 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { | 3742 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { |
3636 int max_freed = 0; | 3743 int max_freed = 0; |
3637 if (page->mutex()->TryLock()) { | 3744 if (page->mutex()->TryLock()) { |
3638 // If this page was already swept in the meantime, we can return here. | 3745 // If this page was already swept in the meantime, we can return here. |
3639 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3746 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
3640 page->mutex()->Unlock(); | 3747 page->mutex()->Unlock(); |
3641 return 0; | 3748 return 0; |
3642 } | 3749 } |
3643 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3750 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3644 if (space->identity() == OLD_SPACE) { | 3751 if (space->identity() == OLD_SPACE) { |
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3769 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() - | 3876 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() - |
3770 start_time); | 3877 start_time); |
3771 } | 3878 } |
3772 } | 3879 } |
3773 | 3880 |
3774 | 3881 |
3775 void MarkCompactCollector::ParallelSweepSpacesComplete() { | 3882 void MarkCompactCollector::ParallelSweepSpacesComplete() { |
3776 sweeping_list(heap()->old_space()).clear(); | 3883 sweeping_list(heap()->old_space()).clear(); |
3777 sweeping_list(heap()->code_space()).clear(); | 3884 sweeping_list(heap()->code_space()).clear(); |
3778 sweeping_list(heap()->map_space()).clear(); | 3885 sweeping_list(heap()->map_space()).clear(); |
3886 if (sweeping_list_shared_ != nullptr) { | |
3887 base::LockGuard<base::Mutex> guard(swept_pages_mutex()); | |
3888 delete sweeping_list_shared_; | |
3889 sweeping_list_shared_ = nullptr; | |
3890 } | |
3779 } | 3891 } |
3780 | 3892 |
3781 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } | 3893 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } |
3782 | 3894 |
3783 | 3895 |
3784 void MarkCompactCollector::Initialize() { | 3896 void MarkCompactCollector::Initialize() { |
3785 MarkCompactMarkingVisitor::Initialize(); | 3897 MarkCompactMarkingVisitor::Initialize(); |
3786 IncrementalMarking::Initialize(); | 3898 IncrementalMarking::Initialize(); |
3787 } | 3899 } |
3788 | 3900 |
(...skipping 17 matching lines...) Expand all Loading... | |
3806 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3918 MarkBit mark_bit = Marking::MarkBitFrom(host); |
3807 if (Marking::IsBlack(mark_bit)) { | 3919 if (Marking::IsBlack(mark_bit)) { |
3808 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3920 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
3809 RecordRelocSlot(host, &rinfo, target); | 3921 RecordRelocSlot(host, &rinfo, target); |
3810 } | 3922 } |
3811 } | 3923 } |
3812 } | 3924 } |
3813 | 3925 |
3814 } // namespace internal | 3926 } // namespace internal |
3815 } // namespace v8 | 3927 } // namespace v8 |
OLD | NEW |