OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 647 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
658 | 658 |
659 // Pairs of (live_bytes_in_page, page). | 659 // Pairs of (live_bytes_in_page, page). |
660 typedef std::pair<int, Page*> LiveBytesPagePair; | 660 typedef std::pair<int, Page*> LiveBytesPagePair; |
661 std::vector<LiveBytesPagePair> pages; | 661 std::vector<LiveBytesPagePair> pages; |
662 pages.reserve(number_of_pages); | 662 pages.reserve(number_of_pages); |
663 | 663 |
664 PageIterator it(space); | 664 PageIterator it(space); |
665 while (it.has_next()) { | 665 while (it.has_next()) { |
666 Page* p = it.next(); | 666 Page* p = it.next(); |
667 if (p->NeverEvacuate()) continue; | 667 if (p->NeverEvacuate()) continue; |
668 if (p->IsFlagSet(Page::POPULAR_PAGE)) { | |
669 // This page had slots buffer overflow on previous GC, skip it. | |
670 p->ClearFlag(Page::POPULAR_PAGE); | |
671 continue; | |
672 } | |
673 // Invariant: Evacuation candidates are just created when marking is | 668 // Invariant: Evacuation candidates are just created when marking is |
674 // started. This means that sweeping has finished. Furthermore, at the end | 669 // started. This means that sweeping has finished. Furthermore, at the end |
675 // of a GC all evacuation candidates are cleared and their slot buffers are | 670 // of a GC all evacuation candidates are cleared and their slot buffers are |
676 // released. | 671 // released. |
677 CHECK(!p->IsEvacuationCandidate()); | 672 CHECK(!p->IsEvacuationCandidate()); |
678 CHECK_NULL(p->old_to_old_slots()); | 673 CHECK_NULL(p->old_to_old_slots()); |
679 CHECK_NULL(p->typed_old_to_old_slots()); | 674 CHECK_NULL(p->typed_old_to_old_slots()); |
680 CHECK(p->SweepingDone()); | 675 CHECK(p->SweepingDone()); |
681 DCHECK(p->area_size() == area_size); | 676 DCHECK(p->area_size() == area_size); |
682 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); | 677 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); |
(...skipping 2268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2951 new_space->ResetAllocationInfo(); | 2946 new_space->ResetAllocationInfo(); |
2952 } | 2947 } |
2953 | 2948 |
2954 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { | 2949 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { |
2955 newspace_evacuation_candidates_.Rewind(0); | 2950 newspace_evacuation_candidates_.Rewind(0); |
2956 } | 2951 } |
2957 | 2952 |
2958 | 2953 |
2959 class MarkCompactCollector::Evacuator : public Malloced { | 2954 class MarkCompactCollector::Evacuator : public Malloced { |
2960 public: | 2955 public: |
2961 Evacuator(MarkCompactCollector* collector, | 2956 explicit Evacuator(MarkCompactCollector* collector) |
2962 const List<Page*>& evacuation_candidates, | |
2963 const List<NewSpacePage*>& newspace_evacuation_candidates) | |
2964 : collector_(collector), | 2957 : collector_(collector), |
2965 evacuation_candidates_(evacuation_candidates), | |
2966 newspace_evacuation_candidates_(newspace_evacuation_candidates), | |
2967 compaction_spaces_(collector->heap()), | 2958 compaction_spaces_(collector->heap()), |
2968 local_pretenuring_feedback_(HashMap::PointersMatch, | 2959 local_pretenuring_feedback_(HashMap::PointersMatch, |
2969 kInitialLocalPretenuringFeedbackCapacity), | 2960 kInitialLocalPretenuringFeedbackCapacity), |
2970 new_space_visitor_(collector->heap(), &compaction_spaces_, | 2961 new_space_visitor_(collector->heap(), &compaction_spaces_, |
2971 &old_to_old_slots_, &old_to_new_slots_, | 2962 &old_to_old_slots_, &old_to_new_slots_, |
2972 &local_pretenuring_feedback_), | 2963 &local_pretenuring_feedback_), |
2973 old_space_visitor_(collector->heap(), &compaction_spaces_, | 2964 old_space_visitor_(collector->heap(), &compaction_spaces_, |
2974 &old_to_old_slots_, &old_to_new_slots_), | 2965 &old_to_old_slots_, &old_to_new_slots_), |
2975 duration_(0.0), | 2966 duration_(0.0), |
2976 bytes_compacted_(0), | 2967 bytes_compacted_(0) {} |
2977 task_id_(0) {} | |
2978 | 2968 |
2979 // Evacuate the configured set of pages in parallel. | 2969 inline bool EvacuatePage(MemoryChunk* chunk); |
2980 inline void EvacuatePages(); | |
2981 | 2970 |
2982 // Merge back locally cached info sequentially. Note that this method needs | 2971 // Merge back locally cached info sequentially. Note that this method needs |
2983 // to be called from the main thread. | 2972 // to be called from the main thread. |
2984 inline void Finalize(); | 2973 inline void Finalize(); |
2985 | 2974 |
2986 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | 2975 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } |
2987 | 2976 |
2988 uint32_t task_id() { return task_id_; } | |
2989 void set_task_id(uint32_t id) { task_id_ = id; } | |
2990 | |
2991 private: | 2977 private: |
2992 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | 2978 static const int kInitialLocalPretenuringFeedbackCapacity = 256; |
2993 | 2979 |
2994 Heap* heap() { return collector_->heap(); } | 2980 Heap* heap() { return collector_->heap(); } |
2995 | 2981 |
2996 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { | 2982 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { |
2997 duration_ += duration; | 2983 duration_ += duration; |
2998 bytes_compacted_ += bytes_compacted; | 2984 bytes_compacted_ += bytes_compacted; |
2999 } | 2985 } |
3000 | 2986 |
3001 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); | 2987 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); |
3002 | 2988 |
3003 MarkCompactCollector* collector_; | 2989 MarkCompactCollector* collector_; |
3004 | 2990 |
3005 // Pages to process. | |
3006 const List<Page*>& evacuation_candidates_; | |
3007 const List<NewSpacePage*>& newspace_evacuation_candidates_; | |
3008 | |
3009 // Locally cached collector data. | 2991 // Locally cached collector data. |
3010 CompactionSpaceCollection compaction_spaces_; | 2992 CompactionSpaceCollection compaction_spaces_; |
3011 LocalSlotsBuffer old_to_old_slots_; | 2993 LocalSlotsBuffer old_to_old_slots_; |
3012 LocalSlotsBuffer old_to_new_slots_; | 2994 LocalSlotsBuffer old_to_new_slots_; |
3013 HashMap local_pretenuring_feedback_; | 2995 HashMap local_pretenuring_feedback_; |
3014 | 2996 |
3015 // Vistors for the corresponding spaces. | 2997 // Vistors for the corresponding spaces. |
3016 EvacuateNewSpaceVisitor new_space_visitor_; | 2998 EvacuateNewSpaceVisitor new_space_visitor_; |
3017 EvacuateOldSpaceVisitor old_space_visitor_; | 2999 EvacuateOldSpaceVisitor old_space_visitor_; |
3018 | 3000 |
3019 // Book keeping info. | 3001 // Book keeping info. |
3020 double duration_; | 3002 double duration_; |
3021 intptr_t bytes_compacted_; | 3003 intptr_t bytes_compacted_; |
3022 | |
3023 // Task id, if this evacuator is executed on a background task instead of | |
3024 // the main thread. Can be used to try to abort the task currently scheduled | |
3025 // to executed to evacuate pages. | |
3026 uint32_t task_id_; | |
3027 }; | 3004 }; |
3028 | 3005 |
3029 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( | 3006 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( |
3030 MemoryChunk* p, HeapObjectVisitor* visitor) { | 3007 MemoryChunk* p, HeapObjectVisitor* visitor) { |
3031 bool success = true; | 3008 bool success = false; |
3032 if (p->parallel_compaction_state().TrySetValue( | 3009 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace()); |
3033 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | 3010 int saved_live_bytes = p->LiveBytes(); |
3034 if (p->IsEvacuationCandidate() || p->InNewSpace()) { | 3011 double evacuation_time; |
3035 DCHECK_EQ(p->parallel_compaction_state().Value(), | 3012 { |
3036 MemoryChunk::kCompactingInProgress); | 3013 AlwaysAllocateScope always_allocate(heap()->isolate()); |
3037 int saved_live_bytes = p->LiveBytes(); | 3014 TimedScope timed_scope(&evacuation_time); |
3038 double evacuation_time; | 3015 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits); |
3039 { | 3016 } |
3040 AlwaysAllocateScope always_allocate(heap()->isolate()); | 3017 if (success) { |
3041 TimedScope timed_scope(&evacuation_time); | 3018 ReportCompactionProgress(evacuation_time, saved_live_bytes); |
3042 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits); | |
3043 } | |
3044 if (success) { | |
3045 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
3046 p->parallel_compaction_state().SetValue( | |
3047 MemoryChunk::kCompactingFinalize); | |
3048 } else { | |
3049 p->parallel_compaction_state().SetValue( | |
3050 MemoryChunk::kCompactingAborted); | |
3051 } | |
3052 } else { | |
3053 // There could be popular pages in the list of evacuation candidates | |
3054 // which we do not compact. | |
3055 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
3056 } | |
3057 } | 3019 } |
3058 return success; | 3020 return success; |
3059 } | 3021 } |
3060 | 3022 |
3061 void MarkCompactCollector::Evacuator::EvacuatePages() { | 3023 bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) { |
3062 for (NewSpacePage* p : newspace_evacuation_candidates_) { | 3024 bool success = false; |
3063 DCHECK(p->InNewSpace()); | 3025 if (chunk->InNewSpace()) { |
3064 DCHECK_EQ(p->concurrent_sweeping_state().Value(), | 3026 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), |
3065 NewSpacePage::kSweepingDone); | 3027 NewSpacePage::kSweepingDone); |
3066 bool success = EvacuateSinglePage(p, &new_space_visitor_); | 3028 success = EvacuateSinglePage(chunk, &new_space_visitor_); |
3067 DCHECK(success); | 3029 DCHECK(success); |
3068 USE(success); | 3030 USE(success); |
| 3031 } else { |
| 3032 DCHECK(chunk->IsEvacuationCandidate() || |
| 3033 chunk->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION)); |
| 3034 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone); |
| 3035 success = EvacuateSinglePage(chunk, &old_space_visitor_); |
3069 } | 3036 } |
3070 for (Page* p : evacuation_candidates_) { | 3037 return success; |
3071 DCHECK(p->IsEvacuationCandidate() || | |
3072 p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION)); | |
3073 DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone); | |
3074 EvacuateSinglePage(p, &old_space_visitor_); | |
3075 } | |
3076 } | 3038 } |
3077 | 3039 |
3078 void MarkCompactCollector::Evacuator::Finalize() { | 3040 void MarkCompactCollector::Evacuator::Finalize() { |
3079 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | 3041 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); |
3080 heap()->code_space()->MergeCompactionSpace( | 3042 heap()->code_space()->MergeCompactionSpace( |
3081 compaction_spaces_.Get(CODE_SPACE)); | 3043 compaction_spaces_.Get(CODE_SPACE)); |
3082 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | 3044 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); |
3083 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); | 3045 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); |
3084 heap()->IncrementSemiSpaceCopiedObjectSize( | 3046 heap()->IncrementSemiSpaceCopiedObjectSize( |
3085 new_space_visitor_.semispace_copied_size()); | 3047 new_space_visitor_.semispace_copied_size()); |
(...skipping 12 matching lines...) Expand all Loading... |
3098 [](Address slot) { | 3060 [](Address slot) { |
3099 Page* page = Page::FromAddress(slot); | 3061 Page* page = Page::FromAddress(slot); |
3100 RememberedSet<OLD_TO_OLD>::Insert(page, slot); | 3062 RememberedSet<OLD_TO_OLD>::Insert(page, slot); |
3101 }, | 3063 }, |
3102 [](SlotType type, Address slot) { | 3064 [](SlotType type, Address slot) { |
3103 Page* page = Page::FromAddress(slot); | 3065 Page* page = Page::FromAddress(slot); |
3104 RememberedSet<OLD_TO_OLD>::InsertTyped(page, type, slot); | 3066 RememberedSet<OLD_TO_OLD>::InsertTyped(page, type, slot); |
3105 }); | 3067 }); |
3106 } | 3068 } |
3107 | 3069 |
3108 class MarkCompactCollector::CompactionTask : public CancelableTask { | |
3109 public: | |
3110 explicit CompactionTask(Heap* heap, Evacuator* evacuator) | |
3111 : CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) { | |
3112 evacuator->set_task_id(id()); | |
3113 } | |
3114 | |
3115 virtual ~CompactionTask() {} | |
3116 | |
3117 private: | |
3118 // v8::internal::CancelableTask overrides. | |
3119 void RunInternal() override { | |
3120 evacuator_->EvacuatePages(); | |
3121 heap_->mark_compact_collector() | |
3122 ->pending_compaction_tasks_semaphore_.Signal(); | |
3123 } | |
3124 | |
3125 Heap* heap_; | |
3126 Evacuator* evacuator_; | |
3127 | |
3128 DISALLOW_COPY_AND_ASSIGN(CompactionTask); | |
3129 }; | |
3130 | |
3131 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3070 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
3132 intptr_t live_bytes) { | 3071 intptr_t live_bytes) { |
3133 if (!FLAG_parallel_compaction) return 1; | 3072 if (!FLAG_parallel_compaction) return 1; |
3134 // Compute the number of needed tasks based on a target compaction time, the | 3073 // Compute the number of needed tasks based on a target compaction time, the |
3135 // profiled compaction speed and marked live memory. | 3074 // profiled compaction speed and marked live memory. |
3136 // | 3075 // |
3137 // The number of parallel compaction tasks is limited by: | 3076 // The number of parallel compaction tasks is limited by: |
3138 // - #evacuation pages | 3077 // - #evacuation pages |
3139 // - (#cores - 1) | 3078 // - (#cores - 1) |
3140 const double kTargetCompactionTimeInMs = 1; | 3079 const double kTargetCompactionTimeInMs = 1; |
(...skipping 10 matching lines...) Expand all Loading... |
3151 if (compaction_speed > 0) { | 3090 if (compaction_speed > 0) { |
3152 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / | 3091 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / |
3153 compaction_speed / kTargetCompactionTimeInMs); | 3092 compaction_speed / kTargetCompactionTimeInMs); |
3154 } else { | 3093 } else { |
3155 tasks = pages; | 3094 tasks = pages; |
3156 } | 3095 } |
3157 const int tasks_capped_pages = Min(pages, tasks); | 3096 const int tasks_capped_pages = Min(pages, tasks); |
3158 return Min(available_cores, tasks_capped_pages); | 3097 return Min(available_cores, tasks_capped_pages); |
3159 } | 3098 } |
3160 | 3099 |
| 3100 class EvacuationJobTraits { |
| 3101 public: |
| 3102 typedef int* PerPageData; // Pointer to number of aborted pages. |
| 3103 typedef MarkCompactCollector::Evacuator* PerTaskData; |
| 3104 |
| 3105 static const bool NeedSequentialFinalization = true; |
| 3106 |
| 3107 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator, |
| 3108 MemoryChunk* chunk, PerPageData) { |
| 3109 return evacuator->EvacuatePage(chunk); |
| 3110 } |
| 3111 |
| 3112 static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success, |
| 3113 PerPageData data) { |
| 3114 if (chunk->InNewSpace()) { |
| 3115 DCHECK(success); |
| 3116 } else { |
| 3117 Page* p = static_cast<Page*>(chunk); |
| 3118 if (success) { |
| 3119 DCHECK(p->IsEvacuationCandidate()); |
| 3120 DCHECK(p->SweepingDone()); |
| 3121 p->Unlink(); |
| 3122 } else { |
| 3123 // We have partially compacted the page, i.e., some objects may have |
| 3124 // moved, others are still in place. |
| 3125 // We need to: |
| 3126 // - Leave the evacuation candidate flag for later processing of slots |
| 3127 // buffer entries. |
| 3128 // - Leave the slots buffer there for processing of entries added by |
| 3129 // the write barrier. |
| 3130 // - Rescan the page as slot recording in the migration buffer only |
| 3131 // happens upon moving (which we potentially didn't do). |
| 3132 // - Leave the page in the list of pages of a space since we could not |
| 3133 // fully evacuate it. |
| 3134 DCHECK(p->IsEvacuationCandidate()); |
| 3135 p->SetFlag(Page::COMPACTION_WAS_ABORTED); |
| 3136 *data += 1; |
| 3137 } |
| 3138 } |
| 3139 } |
| 3140 }; |
3161 | 3141 |
3162 void MarkCompactCollector::EvacuatePagesInParallel() { | 3142 void MarkCompactCollector::EvacuatePagesInParallel() { |
3163 int num_pages = 0; | 3143 PageParallelJob<EvacuationJobTraits> job( |
| 3144 heap_, heap_->isolate()->cancelable_task_manager()); |
| 3145 |
| 3146 int abandoned_pages = 0; |
3164 intptr_t live_bytes = 0; | 3147 intptr_t live_bytes = 0; |
3165 for (Page* page : evacuation_candidates_) { | 3148 for (Page* page : evacuation_candidates_) { |
3166 num_pages++; | |
3167 live_bytes += page->LiveBytes(); | 3149 live_bytes += page->LiveBytes(); |
| 3150 job.AddPage(page, &abandoned_pages); |
3168 } | 3151 } |
3169 for (NewSpacePage* page : newspace_evacuation_candidates_) { | 3152 for (NewSpacePage* page : newspace_evacuation_candidates_) { |
3170 num_pages++; | |
3171 live_bytes += page->LiveBytes(); | 3153 live_bytes += page->LiveBytes(); |
| 3154 job.AddPage(page, &abandoned_pages); |
3172 } | 3155 } |
3173 DCHECK_GE(num_pages, 1); | 3156 DCHECK_GE(job.NumberOfPages(), 1); |
3174 | 3157 |
3175 // Used for trace summary. | 3158 // Used for trace summary. |
3176 intptr_t compaction_speed = 0; | 3159 intptr_t compaction_speed = 0; |
3177 if (FLAG_trace_fragmentation) { | 3160 if (FLAG_trace_fragmentation) { |
3178 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3161 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3179 } | 3162 } |
3180 | 3163 |
3181 const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes); | 3164 const int wanted_num_tasks = |
3182 | 3165 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes); |
3183 // Set up compaction spaces. | 3166 Evacuator** evacuators = new Evacuator*[wanted_num_tasks]; |
3184 Evacuator** evacuators = new Evacuator*[num_tasks]; | 3167 for (int i = 0; i < wanted_num_tasks; i++) { |
3185 for (int i = 0; i < num_tasks; i++) { | 3168 evacuators[i] = new Evacuator(this); |
3186 evacuators[i] = new Evacuator(this, evacuation_candidates_, | |
3187 newspace_evacuation_candidates_); | |
3188 } | 3169 } |
3189 | 3170 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; }); |
3190 // Kick off parallel tasks. | 3171 for (int i = 0; i < wanted_num_tasks; i++) { |
3191 StartParallelCompaction(evacuators, num_tasks); | |
3192 // Wait for unfinished and not-yet-started tasks. | |
3193 WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1); | |
3194 | |
3195 // Finalize local evacuators by merging back all locally cached data. | |
3196 for (int i = 0; i < num_tasks; i++) { | |
3197 evacuators[i]->Finalize(); | 3172 evacuators[i]->Finalize(); |
3198 delete evacuators[i]; | 3173 delete evacuators[i]; |
3199 } | 3174 } |
3200 delete[] evacuators; | 3175 delete[] evacuators; |
3201 | 3176 |
3202 // Finalize pages sequentially. | |
3203 for (NewSpacePage* p : newspace_evacuation_candidates_) { | |
3204 DCHECK_EQ(p->parallel_compaction_state().Value(), | |
3205 MemoryChunk::kCompactingFinalize); | |
3206 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
3207 } | |
3208 | |
3209 int abandoned_pages = 0; | |
3210 for (Page* p : evacuation_candidates_) { | |
3211 switch (p->parallel_compaction_state().Value()) { | |
3212 case MemoryChunk::ParallelCompactingState::kCompactingAborted: | |
3213 // We have partially compacted the page, i.e., some objects may have | |
3214 // moved, others are still in place. | |
3215 // We need to: | |
3216 // - Leave the evacuation candidate flag for later processing of | |
3217 // slots buffer entries. | |
3218 // - Leave the slots buffer there for processing of entries added by | |
3219 // the write barrier. | |
3220 // - Rescan the page as slot recording in the migration buffer only | |
3221 // happens upon moving (which we potentially didn't do). | |
3222 // - Leave the page in the list of pages of a space since we could not | |
3223 // fully evacuate it. | |
3224 // - Mark them for rescanning for store buffer entries as we otherwise | |
3225 // might have stale store buffer entries that become "valid" again | |
3226 // after reusing the memory. Note that all existing store buffer | |
3227 // entries of such pages are filtered before rescanning. | |
3228 DCHECK(p->IsEvacuationCandidate()); | |
3229 p->SetFlag(Page::COMPACTION_WAS_ABORTED); | |
3230 abandoned_pages++; | |
3231 break; | |
3232 case MemoryChunk::kCompactingFinalize: | |
3233 DCHECK(p->IsEvacuationCandidate()); | |
3234 DCHECK(p->SweepingDone()); | |
3235 p->Unlink(); | |
3236 break; | |
3237 case MemoryChunk::kCompactingDone: | |
3238 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); | |
3239 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | |
3240 break; | |
3241 default: | |
3242 // MemoryChunk::kCompactingInProgress. | |
3243 UNREACHABLE(); | |
3244 } | |
3245 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
3246 } | |
3247 if (FLAG_trace_fragmentation) { | 3177 if (FLAG_trace_fragmentation) { |
3248 PrintIsolate(isolate(), | 3178 PrintIsolate(isolate(), |
3249 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " | 3179 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " |
3250 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX | 3180 "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX |
3251 "d compaction_speed=%" V8_PTR_PREFIX "d\n", | 3181 "d compaction_speed=%" V8_PTR_PREFIX "d\n", |
3252 isolate()->time_millis_since_init(), FLAG_parallel_compaction, | 3182 isolate()->time_millis_since_init(), FLAG_parallel_compaction, |
3253 num_pages, abandoned_pages, num_tasks, | 3183 job.NumberOfPages(), abandoned_pages, wanted_num_tasks, |
3254 base::SysInfo::NumberOfProcessors(), live_bytes, | 3184 job.NumberOfTasks(), |
3255 compaction_speed); | 3185 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(), |
| 3186 live_bytes, compaction_speed); |
3256 } | 3187 } |
3257 } | 3188 } |
3258 | 3189 |
3259 void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators, | |
3260 int len) { | |
3261 compaction_in_progress_ = true; | |
3262 for (int i = 1; i < len; i++) { | |
3263 CompactionTask* task = new CompactionTask(heap(), evacuators[i]); | |
3264 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
3265 task, v8::Platform::kShortRunningTask); | |
3266 } | |
3267 | |
3268 // Contribute on main thread. | |
3269 evacuators[0]->EvacuatePages(); | |
3270 } | |
3271 | |
3272 void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators, | |
3273 int len) { | |
3274 // Try to cancel compaction tasks that have not been run (as they might be | |
3275 // stuck in a worker queue). Tasks that cannot be canceled, have either | |
3276 // already completed or are still running, hence we need to wait for their | |
3277 // semaphore signal. | |
3278 for (int i = 0; i < len; i++) { | |
3279 if (!heap()->isolate()->cancelable_task_manager()->TryAbort( | |
3280 evacuators[i]->task_id())) { | |
3281 pending_compaction_tasks_semaphore_.Wait(); | |
3282 } | |
3283 } | |
3284 compaction_in_progress_ = false; | |
3285 } | |
3286 | |
3287 | |
3288 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3190 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
3289 public: | 3191 public: |
3290 virtual Object* RetainAs(Object* object) { | 3192 virtual Object* RetainAs(Object* object) { |
3291 if (object->IsHeapObject()) { | 3193 if (object->IsHeapObject()) { |
3292 HeapObject* heap_object = HeapObject::cast(object); | 3194 HeapObject* heap_object = HeapObject::cast(object); |
3293 MapWord map_word = heap_object->map_word(); | 3195 MapWord map_word = heap_object->map_word(); |
3294 if (map_word.IsForwardingAddress()) { | 3196 if (map_word.IsForwardingAddress()) { |
3295 return map_word.ToForwardingAddress(); | 3197 return map_word.ToForwardingAddress(); |
3296 } | 3198 } |
3297 } | 3199 } |
(...skipping 659 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3957 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3859 MarkBit mark_bit = Marking::MarkBitFrom(host); |
3958 if (Marking::IsBlack(mark_bit)) { | 3860 if (Marking::IsBlack(mark_bit)) { |
3959 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3861 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
3960 RecordRelocSlot(host, &rinfo, target); | 3862 RecordRelocSlot(host, &rinfo, target); |
3961 } | 3863 } |
3962 } | 3864 } |
3963 } | 3865 } |
3964 | 3866 |
3965 } // namespace internal | 3867 } // namespace internal |
3966 } // namespace v8 | 3868 } // namespace v8 |
OLD | NEW |