OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
11 #include "src/compilation-cache.h" | 11 #include "src/compilation-cache.h" |
12 #include "src/deoptimizer.h" | 12 #include "src/deoptimizer.h" |
13 #include "src/execution.h" | 13 #include "src/execution.h" |
14 #include "src/frames-inl.h" | 14 #include "src/frames-inl.h" |
15 #include "src/gdb-jit.h" | 15 #include "src/gdb-jit.h" |
16 #include "src/global-handles.h" | 16 #include "src/global-handles.h" |
17 #include "src/heap/array-buffer-tracker.h" | 17 #include "src/heap/array-buffer-tracker.h" |
18 #include "src/heap/gc-tracer.h" | 18 #include "src/heap/gc-tracer.h" |
19 #include "src/heap/incremental-marking.h" | 19 #include "src/heap/incremental-marking.h" |
20 #include "src/heap/mark-compact-inl.h" | 20 #include "src/heap/mark-compact-inl.h" |
21 #include "src/heap/object-stats.h" | 21 #include "src/heap/object-stats.h" |
22 #include "src/heap/objects-visiting-inl.h" | 22 #include "src/heap/objects-visiting-inl.h" |
23 #include "src/heap/objects-visiting.h" | 23 #include "src/heap/objects-visiting.h" |
24 #include "src/heap/page-parallel-job.h" | |
24 #include "src/heap/spaces-inl.h" | 25 #include "src/heap/spaces-inl.h" |
25 #include "src/ic/ic.h" | 26 #include "src/ic/ic.h" |
26 #include "src/ic/stub-cache.h" | 27 #include "src/ic/stub-cache.h" |
27 #include "src/profiler/cpu-profiler.h" | 28 #include "src/profiler/cpu-profiler.h" |
28 #include "src/utils-inl.h" | 29 #include "src/utils-inl.h" |
29 #include "src/v8.h" | 30 #include "src/v8.h" |
30 | 31 |
31 namespace v8 { | 32 namespace v8 { |
32 namespace internal { | 33 namespace internal { |
33 | 34 |
(...skipping 2747 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2781 !MarkCompactCollector::IsOnEvacuationCandidate(target)); | 2782 !MarkCompactCollector::IsOnEvacuationCandidate(target)); |
2782 } | 2783 } |
2783 } | 2784 } |
2784 | 2785 |
2785 private: | 2786 private: |
2786 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); } | 2787 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); } |
2787 | 2788 |
2788 Heap* heap_; | 2789 Heap* heap_; |
2789 }; | 2790 }; |
2790 | 2791 |
2791 | 2792 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) { |
Michael Lippautz
2016/03/09 09:48:53
If its only use is within PointerUpdateJobTraits,
| |
2792 static void UpdatePointer(HeapObject** address, HeapObject* object) { | |
2793 MapWord map_word = object->map_word(); | 2793 MapWord map_word = object->map_word(); |
2794 // Since we only filter invalid slots in old space, the store buffer can | 2794 // Since we only filter invalid slots in old space, the store buffer can |
2795 // still contain stale pointers in large object and in map spaces. Ignore | 2795 // still contain stale pointers in large object and in map spaces. Ignore |
2796 // these pointers here. | 2796 // these pointers here. |
2797 DCHECK(map_word.IsForwardingAddress() || | 2797 DCHECK(map_word.IsForwardingAddress() || |
2798 !object->GetHeap()->old_space()->Contains( | 2798 !object->GetHeap()->old_space()->Contains( |
2799 reinterpret_cast<Address>(address))); | 2799 reinterpret_cast<Address>(address))); |
2800 if (map_word.IsForwardingAddress()) { | 2800 if (map_word.IsForwardingAddress()) { |
2801 // Update the corresponding slot. | 2801 // Update the corresponding slot. |
2802 *address = map_word.ToForwardingAddress(); | 2802 *address = map_word.ToForwardingAddress(); |
(...skipping 348 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3151 // | 3151 // |
3152 // The number of parallel compaction tasks is limited by: | 3152 // The number of parallel compaction tasks is limited by: |
3153 // - #evacuation pages | 3153 // - #evacuation pages |
3154 // - (#cores - 1) | 3154 // - (#cores - 1) |
3155 const double kTargetCompactionTimeInMs = 1; | 3155 const double kTargetCompactionTimeInMs = 1; |
3156 const int kNumSweepingTasks = 3; | 3156 const int kNumSweepingTasks = 3; |
3157 | 3157 |
3158 intptr_t compaction_speed = | 3158 intptr_t compaction_speed = |
3159 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3159 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3160 | 3160 |
3161 const int available_cores = | 3161 const int available_cores = Max( |
3162 Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1); | 3162 1, static_cast<int>( |
Michael Lippautz
2016/03/09 09:48:53
Thanks for the drive-by-fix ;)
ulan
2016/03/09 17:34:18
Done.
| |
3163 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) - | |
3164 kNumSweepingTasks - 1); | |
3163 int tasks; | 3165 int tasks; |
3164 if (compaction_speed > 0) { | 3166 if (compaction_speed > 0) { |
3165 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / | 3167 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / |
3166 compaction_speed / kTargetCompactionTimeInMs); | 3168 compaction_speed / kTargetCompactionTimeInMs); |
3167 } else { | 3169 } else { |
3168 tasks = pages; | 3170 tasks = pages; |
3169 } | 3171 } |
3170 const int tasks_capped_pages = Min(pages, tasks); | 3172 const int tasks_capped_pages = Min(pages, tasks); |
3171 return Min(available_cores, tasks_capped_pages); | 3173 return Min(available_cores, tasks_capped_pages); |
3172 } | 3174 } |
(...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3578 ReleaseEvacuationCandidates(); | 3580 ReleaseEvacuationCandidates(); |
3579 } | 3581 } |
3580 | 3582 |
3581 #ifdef VERIFY_HEAP | 3583 #ifdef VERIFY_HEAP |
3582 if (FLAG_verify_heap && !sweeping_in_progress_) { | 3584 if (FLAG_verify_heap && !sweeping_in_progress_) { |
3583 VerifyEvacuation(heap()); | 3585 VerifyEvacuation(heap()); |
3584 } | 3586 } |
3585 #endif | 3587 #endif |
3586 } | 3588 } |
3587 | 3589 |
3590 template <PointerDirection direction> | |
3591 class PointerUpdateJobTraits { | |
3592 public: | |
3593 typedef int PerPageData; | |
Michael Lippautz
2016/03/09 09:48:53
Maybe add a comment that PerPageData is unused for
ulan
2016/03/09 17:34:18
Done.
| |
3594 typedef PointersUpdatingVisitor* PerTaskData; | |
3595 | |
3596 static bool ProcessPageInParallel(Heap* heap, | |
3597 PointersUpdatingVisitor* visitor, | |
Michael Lippautz
2016/03/09 09:48:53
Not sure if the equivalent version of the decl
s
ulan
2016/03/09 17:34:18
Done. It is shorter :)
| |
3598 MemoryChunk* chunk, PerPageData) { | |
3599 UpdateUntypedPointers(heap, chunk); | |
3600 UpdateTypedPointers(heap, chunk, visitor); | |
3601 return true; | |
3602 } | |
3603 static const bool NeedSequentialFinalization = false; | |
3604 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { | |
3605 } | |
3606 | |
3607 private: | |
3608 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { | |
3609 if (direction == OLD_TO_NEW) { | |
3610 RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk, | |
3611 UpdateOldToNewSlot); | |
3612 } else { | |
3613 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) { | |
3614 PointersUpdatingVisitor::UpdateSlot(heap, | |
3615 reinterpret_cast<Object**>(slot)); | |
3616 return REMOVE_SLOT; | |
3617 }); | |
3618 } | |
3619 } | |
3620 | |
3621 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk, | |
3622 PointersUpdatingVisitor* visitor) { | |
3623 if (direction == OLD_TO_OLD) { | |
3624 Isolate* isolate = heap->isolate(); | |
3625 RememberedSet<OLD_TO_OLD>::IterateTyped( | |
3626 chunk, [isolate, visitor](SlotType type, Address slot) { | |
3627 UpdateTypedSlot(isolate, visitor, type, slot); | |
3628 return REMOVE_SLOT; | |
3629 }); | |
3630 } | |
3631 } | |
3632 }; | |
3633 | |
3634 int NumberOfPointerUpdateTasks(int pages) { | |
3635 if (!FLAG_parallel_pointer_update) return 1; | |
3636 const int kMaxTasks = 4; | |
3637 const int kPagesPerTask = 4; | |
3638 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask); | |
3639 } | |
3640 | |
3641 template <PointerDirection direction> | |
3642 void UpdatePointersInParallel(Heap* heap) { | |
3643 PageParallelJob<PointerUpdateJobTraits<direction> > job( | |
3644 heap, heap->isolate()->cancelable_task_manager()); | |
3645 RememberedSet<direction>::IterateMemoryChunks( | |
3646 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); | |
3647 PointersUpdatingVisitor visitor(heap); | |
3648 int num_pages = job.NumberOfPages(); | |
3649 int num_tasks = NumberOfPointerUpdateTasks(num_pages); | |
3650 job.Run(num_tasks, [&visitor](int i) { return &visitor; }); | |
3651 } | |
3588 | 3652 |
3589 void MarkCompactCollector::UpdatePointersAfterEvacuation() { | 3653 void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
3590 GCTracer::Scope gc_scope(heap()->tracer(), | 3654 GCTracer::Scope gc_scope(heap()->tracer(), |
3591 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); | 3655 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
3592 | 3656 |
3593 PointersUpdatingVisitor updating_visitor(heap()); | 3657 PointersUpdatingVisitor updating_visitor(heap()); |
3594 | 3658 |
3595 { | 3659 { |
3596 GCTracer::Scope gc_scope( | 3660 GCTracer::Scope gc_scope( |
3597 heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); | 3661 heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); |
3598 // Update pointers in to space. | 3662 // Update pointers in to space. |
3599 SemiSpaceIterator to_it(heap()->new_space()); | 3663 SemiSpaceIterator to_it(heap()->new_space()); |
3600 for (HeapObject* object = to_it.Next(); object != NULL; | 3664 for (HeapObject* object = to_it.Next(); object != NULL; |
3601 object = to_it.Next()) { | 3665 object = to_it.Next()) { |
3602 Map* map = object->map(); | 3666 Map* map = object->map(); |
3603 object->IterateBody(map->instance_type(), object->SizeFromMap(map), | 3667 object->IterateBody(map->instance_type(), object->SizeFromMap(map), |
3604 &updating_visitor); | 3668 &updating_visitor); |
3605 } | 3669 } |
3606 // Update roots. | 3670 // Update roots. |
3607 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 3671 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
3608 | 3672 |
3609 RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer); | 3673 UpdatePointersInParallel<OLD_TO_NEW>(heap_); |
3610 } | 3674 } |
3611 | 3675 |
3612 { | 3676 { |
3613 Heap* heap = this->heap(); | 3677 Heap* heap = this->heap(); |
3614 GCTracer::Scope gc_scope( | 3678 GCTracer::Scope gc_scope( |
3615 heap->tracer(), | 3679 heap->tracer(), |
3616 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); | 3680 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
3617 | 3681 UpdatePointersInParallel<OLD_TO_OLD>(heap_); |
3618 RememberedSet<OLD_TO_OLD>::Iterate(heap, [heap](Address slot) { | |
3619 PointersUpdatingVisitor::UpdateSlot(heap, | |
3620 reinterpret_cast<Object**>(slot)); | |
3621 return REMOVE_SLOT; | |
3622 }); | |
3623 Isolate* isolate = heap->isolate(); | |
3624 PointersUpdatingVisitor* visitor = &updating_visitor; | |
3625 RememberedSet<OLD_TO_OLD>::IterateTyped( | |
3626 heap, [isolate, visitor](SlotType type, Address slot) { | |
3627 UpdateTypedSlot(isolate, visitor, type, slot); | |
3628 return REMOVE_SLOT; | |
3629 }); | |
3630 } | 3682 } |
3631 | 3683 |
3632 { | 3684 { |
3633 GCTracer::Scope gc_scope( | 3685 GCTracer::Scope gc_scope( |
3634 heap()->tracer(), | 3686 heap()->tracer(), |
3635 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); | 3687 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); |
3636 for (Page* p : evacuation_candidates_) { | 3688 for (Page* p : evacuation_candidates_) { |
3637 DCHECK(p->IsEvacuationCandidate() || | 3689 DCHECK(p->IsEvacuationCandidate() || |
3638 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3690 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3639 | 3691 |
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3907 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3959 MarkBit mark_bit = Marking::MarkBitFrom(host); |
3908 if (Marking::IsBlack(mark_bit)) { | 3960 if (Marking::IsBlack(mark_bit)) { |
3909 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3961 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
3910 RecordRelocSlot(host, &rinfo, target); | 3962 RecordRelocSlot(host, &rinfo, target); |
3911 } | 3963 } |
3912 } | 3964 } |
3913 } | 3965 } |
3914 | 3966 |
3915 } // namespace internal | 3967 } // namespace internal |
3916 } // namespace v8 | 3968 } // namespace v8 |
OLD | NEW |