Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(90)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1775003003: Implement parallel pointer updates after evacuation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/flag-definitions.h ('k') | src/heap/page-parallel-job.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h" 11 #include "src/compilation-cache.h"
12 #include "src/deoptimizer.h" 12 #include "src/deoptimizer.h"
13 #include "src/execution.h" 13 #include "src/execution.h"
14 #include "src/frames-inl.h" 14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h" 15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h" 16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h" 17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h" 18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h" 19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h" 20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h" 21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting-inl.h" 22 #include "src/heap/objects-visiting-inl.h"
23 #include "src/heap/objects-visiting.h" 23 #include "src/heap/objects-visiting.h"
24 #include "src/heap/page-parallel-job.h"
24 #include "src/heap/spaces-inl.h" 25 #include "src/heap/spaces-inl.h"
25 #include "src/ic/ic.h" 26 #include "src/ic/ic.h"
26 #include "src/ic/stub-cache.h" 27 #include "src/ic/stub-cache.h"
27 #include "src/profiler/cpu-profiler.h" 28 #include "src/profiler/cpu-profiler.h"
28 #include "src/utils-inl.h" 29 #include "src/utils-inl.h"
29 #include "src/v8.h" 30 #include "src/v8.h"
30 31
31 namespace v8 { 32 namespace v8 {
32 namespace internal { 33 namespace internal {
33 34
(...skipping 2747 matching lines...) Expand 10 before | Expand all | Expand 10 after
2781 !MarkCompactCollector::IsOnEvacuationCandidate(target)); 2782 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2782 } 2783 }
2783 } 2784 }
2784 2785
2785 private: 2786 private:
2786 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); } 2787 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2787 2788
2788 Heap* heap_; 2789 Heap* heap_;
2789 }; 2790 };
2790 2791
2791
2792 static void UpdatePointer(HeapObject** address, HeapObject* object) {
2793 MapWord map_word = object->map_word();
2794 // Since we only filter invalid slots in old space, the store buffer can
2795 // still contain stale pointers in large object and in map spaces. Ignore
2796 // these pointers here.
2797 DCHECK(map_word.IsForwardingAddress() ||
2798 !object->GetHeap()->old_space()->Contains(
2799 reinterpret_cast<Address>(address)));
2800 if (map_word.IsForwardingAddress()) {
2801 // Update the corresponding slot.
2802 *address = map_word.ToForwardingAddress();
2803 }
2804 }
2805
2806
2807 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, 2792 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2808 Object** p) { 2793 Object** p) {
2809 MapWord map_word = HeapObject::cast(*p)->map_word(); 2794 MapWord map_word = HeapObject::cast(*p)->map_word();
2810 2795
2811 if (map_word.IsForwardingAddress()) { 2796 if (map_word.IsForwardingAddress()) {
2812 return String::cast(map_word.ToForwardingAddress()); 2797 return String::cast(map_word.ToForwardingAddress());
2813 } 2798 }
2814 2799
2815 return String::cast(*p); 2800 return String::cast(*p);
2816 } 2801 }
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after
3151 // 3136 //
3152 // The number of parallel compaction tasks is limited by: 3137 // The number of parallel compaction tasks is limited by:
3153 // - #evacuation pages 3138 // - #evacuation pages
3154 // - (#cores - 1) 3139 // - (#cores - 1)
3155 const double kTargetCompactionTimeInMs = 1; 3140 const double kTargetCompactionTimeInMs = 1;
3156 const int kNumSweepingTasks = 3; 3141 const int kNumSweepingTasks = 3;
3157 3142
3158 intptr_t compaction_speed = 3143 intptr_t compaction_speed =
3159 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3144 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3160 3145
3161 const int available_cores = 3146 const int available_cores = Max(
3162 Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1); 3147 1, static_cast<int>(
3148 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
3149 kNumSweepingTasks - 1);
3163 int tasks; 3150 int tasks;
3164 if (compaction_speed > 0) { 3151 if (compaction_speed > 0) {
3165 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / 3152 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
3166 compaction_speed / kTargetCompactionTimeInMs); 3153 compaction_speed / kTargetCompactionTimeInMs);
3167 } else { 3154 } else {
3168 tasks = pages; 3155 tasks = pages;
3169 } 3156 }
3170 const int tasks_capped_pages = Min(pages, tasks); 3157 const int tasks_capped_pages = Min(pages, tasks);
3171 return Min(available_cores, tasks_capped_pages); 3158 return Min(available_cores, tasks_capped_pages);
3172 } 3159 }
(...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after
3578 ReleaseEvacuationCandidates(); 3565 ReleaseEvacuationCandidates();
3579 } 3566 }
3580 3567
3581 #ifdef VERIFY_HEAP 3568 #ifdef VERIFY_HEAP
3582 if (FLAG_verify_heap && !sweeping_in_progress_) { 3569 if (FLAG_verify_heap && !sweeping_in_progress_) {
3583 VerifyEvacuation(heap()); 3570 VerifyEvacuation(heap());
3584 } 3571 }
3585 #endif 3572 #endif
3586 } 3573 }
3587 3574
3575 template <PointerDirection direction>
3576 class PointerUpdateJobTraits {
3577 public:
3578 typedef int PerPageData; // Per page data is not used in this job.
3579 typedef PointersUpdatingVisitor* PerTaskData;
3580
3581 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3582 MemoryChunk* chunk, PerPageData) {
3583 UpdateUntypedPointers(heap, chunk);
3584 UpdateTypedPointers(heap, chunk, visitor);
3585 return true;
3586 }
3587 static const bool NeedSequentialFinalization = false;
3588 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3589 }
3590
3591 private:
3592 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3593 if (direction == OLD_TO_NEW) {
3594 RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
3595 UpdateOldToNewSlot);
3596 } else {
3597 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
3598 PointersUpdatingVisitor::UpdateSlot(heap,
3599 reinterpret_cast<Object**>(slot));
3600 return REMOVE_SLOT;
3601 });
3602 }
3603 }
3604
3605 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
3606 PointersUpdatingVisitor* visitor) {
3607 if (direction == OLD_TO_OLD) {
3608 Isolate* isolate = heap->isolate();
3609 RememberedSet<OLD_TO_OLD>::IterateTyped(
3610 chunk, [isolate, visitor](SlotType type, Address slot) {
3611 UpdateTypedSlot(isolate, visitor, type, slot);
3612 return REMOVE_SLOT;
3613 });
3614 }
3615 }
3616
3617 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
3618 MapWord map_word = object->map_word();
3619 // Since we only filter invalid slots in old space, the store buffer can
3620 // still contain stale pointers in large object and in map spaces. Ignore
3621 // these pointers here.
3622 DCHECK(map_word.IsForwardingAddress() ||
3623 !object->GetHeap()->old_space()->Contains(
3624 reinterpret_cast<Address>(address)));
3625 if (map_word.IsForwardingAddress()) {
3626 // Update the corresponding slot.
3627 *address = map_word.ToForwardingAddress();
3628 }
3629 }
3630 };
3631
3632 int NumberOfPointerUpdateTasks(int pages) {
3633 if (!FLAG_parallel_pointer_update) return 1;
3634 const int kMaxTasks = 4;
3635 const int kPagesPerTask = 4;
3636 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
3637 }
3638
3639 template <PointerDirection direction>
3640 void UpdatePointersInParallel(Heap* heap) {
3641 PageParallelJob<PointerUpdateJobTraits<direction> > job(
3642 heap, heap->isolate()->cancelable_task_manager());
3643 RememberedSet<direction>::IterateMemoryChunks(
3644 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3645 PointersUpdatingVisitor visitor(heap);
3646 int num_pages = job.NumberOfPages();
3647 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3648 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3649 }
3588 3650
3589 void MarkCompactCollector::UpdatePointersAfterEvacuation() { 3651 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3590 GCTracer::Scope gc_scope(heap()->tracer(), 3652 GCTracer::Scope gc_scope(heap()->tracer(),
3591 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); 3653 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3592 3654
3593 PointersUpdatingVisitor updating_visitor(heap()); 3655 PointersUpdatingVisitor updating_visitor(heap());
3594 3656
3595 { 3657 {
3596 GCTracer::Scope gc_scope( 3658 GCTracer::Scope gc_scope(
3597 heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW); 3659 heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
3598 // Update pointers in to space. 3660 // Update pointers in to space.
3599 SemiSpaceIterator to_it(heap()->new_space()); 3661 SemiSpaceIterator to_it(heap()->new_space());
3600 for (HeapObject* object = to_it.Next(); object != NULL; 3662 for (HeapObject* object = to_it.Next(); object != NULL;
3601 object = to_it.Next()) { 3663 object = to_it.Next()) {
3602 Map* map = object->map(); 3664 Map* map = object->map();
3603 object->IterateBody(map->instance_type(), object->SizeFromMap(map), 3665 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3604 &updating_visitor); 3666 &updating_visitor);
3605 } 3667 }
3606 // Update roots. 3668 // Update roots.
3607 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 3669 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3608 3670
3609 RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer); 3671 UpdatePointersInParallel<OLD_TO_NEW>(heap_);
3610 } 3672 }
3611 3673
3612 { 3674 {
3613 Heap* heap = this->heap(); 3675 Heap* heap = this->heap();
3614 GCTracer::Scope gc_scope( 3676 GCTracer::Scope gc_scope(
3615 heap->tracer(), 3677 heap->tracer(),
3616 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); 3678 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3617 3679 UpdatePointersInParallel<OLD_TO_OLD>(heap_);
3618 RememberedSet<OLD_TO_OLD>::Iterate(heap, [heap](Address slot) {
3619 PointersUpdatingVisitor::UpdateSlot(heap,
3620 reinterpret_cast<Object**>(slot));
3621 return REMOVE_SLOT;
3622 });
3623 Isolate* isolate = heap->isolate();
3624 PointersUpdatingVisitor* visitor = &updating_visitor;
3625 RememberedSet<OLD_TO_OLD>::IterateTyped(
3626 heap, [isolate, visitor](SlotType type, Address slot) {
3627 UpdateTypedSlot(isolate, visitor, type, slot);
3628 return REMOVE_SLOT;
3629 });
3630 } 3680 }
3631 3681
3632 { 3682 {
3633 GCTracer::Scope gc_scope( 3683 GCTracer::Scope gc_scope(
3634 heap()->tracer(), 3684 heap()->tracer(),
3635 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); 3685 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
3636 for (Page* p : evacuation_candidates_) { 3686 for (Page* p : evacuation_candidates_) {
3637 DCHECK(p->IsEvacuationCandidate() || 3687 DCHECK(p->IsEvacuationCandidate() ||
3638 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3688 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3639 3689
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after
3907 MarkBit mark_bit = Marking::MarkBitFrom(host); 3957 MarkBit mark_bit = Marking::MarkBitFrom(host);
3908 if (Marking::IsBlack(mark_bit)) { 3958 if (Marking::IsBlack(mark_bit)) {
3909 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 3959 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3910 RecordRelocSlot(host, &rinfo, target); 3960 RecordRelocSlot(host, &rinfo, target);
3911 } 3961 }
3912 } 3962 }
3913 } 3963 }
3914 3964
3915 } // namespace internal 3965 } // namespace internal
3916 } // namespace v8 3966 } // namespace v8
OLDNEW
« no previous file with comments | « src/flag-definitions.h ('k') | src/heap/page-parallel-job.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698