Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(68)

Side by Side Diff: src/heap/mark-compact.cc

Issue 2764473002: [heap] Make SlotSet allocation thread-safe and refactor code. (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/heap/remembered-set.h » ('j') | src/heap/spaces.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 621 matching lines...) Expand 10 before | Expand all | Expand 10 after
632 space->top() == space->limit() 632 space->top() == space->limit()
633 ? nullptr 633 ? nullptr
634 : Page::FromAllocationAreaAddress(space->top()); 634 : Page::FromAllocationAreaAddress(space->top());
635 for (Page* p : *space) { 635 for (Page* p : *space) {
636 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue; 636 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
637 // Invariant: Evacuation candidates are just created when marking is 637 // Invariant: Evacuation candidates are just created when marking is
638 // started. This means that sweeping has finished. Furthermore, at the end 638 // started. This means that sweeping has finished. Furthermore, at the end
639 // of a GC all evacuation candidates are cleared and their slot buffers are 639 // of a GC all evacuation candidates are cleared and their slot buffers are
640 // released. 640 // released.
641 CHECK(!p->IsEvacuationCandidate()); 641 CHECK(!p->IsEvacuationCandidate());
642 CHECK_NULL(p->old_to_old_slots()); 642 CHECK_NULL(p->slot_set<OLD_TO_OLD>());
643 CHECK_NULL(p->typed_old_to_old_slots()); 643 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
644 CHECK(p->SweepingDone()); 644 CHECK(p->SweepingDone());
645 DCHECK(p->area_size() == area_size); 645 DCHECK(p->area_size() == area_size);
646 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); 646 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
647 } 647 }
648 648
649 int candidate_count = 0; 649 int candidate_count = 0;
650 size_t total_live_bytes = 0; 650 size_t total_live_bytes = 0;
651 651
652 const bool reduce_memory = heap()->ShouldReduceMemory(); 652 const bool reduce_memory = heap()->ShouldReduceMemory();
653 if (FLAG_manual_evacuation_candidates_selection) { 653 if (FLAG_manual_evacuation_candidates_selection) {
(...skipping 2651 matching lines...) Expand 10 before | Expand all | Expand 10 after
3305 return map_word.ToForwardingAddress(); 3305 return map_word.ToForwardingAddress();
3306 } 3306 }
3307 } 3307 }
3308 return object; 3308 return object;
3309 } 3309 }
3310 }; 3310 };
3311 3311
3312 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode 3312 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
3313 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) { 3313 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
3314 AllocationSpace identity = p->owner()->identity(); 3314 AllocationSpace identity = p->owner()->identity();
3315 if (p->old_to_new_slots() && 3315 if (p->slot_set<OLD_TO_NEW>() &&
3316 (identity == OLD_SPACE || identity == MAP_SPACE)) { 3316 (identity == OLD_SPACE || identity == MAP_SPACE)) {
3317 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS; 3317 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
3318 } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) { 3318 } else if (p->typed_slot_set<OLD_TO_NEW>() && identity == CODE_SPACE) {
3319 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS; 3319 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
3320 } 3320 }
3321 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR; 3321 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
3322 } 3322 }
3323 3323
3324 int MarkCompactCollector::Sweeper::RawSweep( 3324 int MarkCompactCollector::Sweeper::RawSweep(
3325 Page* p, FreeListRebuildingMode free_list_mode, 3325 Page* p, FreeListRebuildingMode free_list_mode,
3326 FreeSpaceTreatmentMode free_space_mode) { 3326 FreeSpaceTreatmentMode free_space_mode) {
3327 Space* space = p->owner(); 3327 Space* space = p->owner();
3328 DCHECK_NOT_NULL(space); 3328 DCHECK_NOT_NULL(space);
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
3422 SlotSet::KEEP_EMPTY_BUCKETS); 3422 SlotSet::KEEP_EMPTY_BUCKETS);
3423 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { 3423 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3424 free_ranges.insert(std::pair<uint32_t, uint32_t>( 3424 free_ranges.insert(std::pair<uint32_t, uint32_t>(
3425 static_cast<uint32_t>(free_start - p->address()), 3425 static_cast<uint32_t>(free_start - p->address()),
3426 static_cast<uint32_t>(p->area_end() - p->address()))); 3426 static_cast<uint32_t>(p->area_end() - p->address())));
3427 } 3427 }
3428 } 3428 }
3429 3429
3430 // Clear invalid typed slots after collection all free ranges. 3430 // Clear invalid typed slots after collection all free ranges.
3431 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { 3431 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3432 p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges); 3432 p->typed_slot_set<OLD_TO_NEW>()->RemoveInvaldSlots(free_ranges);
3433 } 3433 }
3434 3434
3435 // Clear the mark bits of that page and reset live bytes count. 3435 // Clear the mark bits of that page and reset live bytes count.
3436 p->ClearLiveness(); 3436 p->ClearLiveness();
3437 3437
3438 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); 3438 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3439 if (free_list_mode == IGNORE_FREE_LIST) return 0; 3439 if (free_list_mode == IGNORE_FREE_LIST) return 0;
3440 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes)); 3440 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
3441 } 3441 }
3442 3442
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
3477 IterationMode mode) { 3477 IterationMode mode) {
3478 LiveObjectIterator<kBlackObjects> it(page); 3478 LiveObjectIterator<kBlackObjects> it(page);
3479 HeapObject* object = nullptr; 3479 HeapObject* object = nullptr;
3480 while ((object = it.Next()) != nullptr) { 3480 while ((object = it.Next()) != nullptr) {
3481 DCHECK(ObjectMarking::IsBlack(object)); 3481 DCHECK(ObjectMarking::IsBlack(object));
3482 if (!visitor->Visit(object)) { 3482 if (!visitor->Visit(object)) {
3483 if (mode == kClearMarkbits) { 3483 if (mode == kClearMarkbits) {
3484 page->markbits()->ClearRange( 3484 page->markbits()->ClearRange(
3485 page->AddressToMarkbitIndex(page->area_start()), 3485 page->AddressToMarkbitIndex(page->area_start()),
3486 page->AddressToMarkbitIndex(object->address())); 3486 page->AddressToMarkbitIndex(object->address()));
3487 if (page->old_to_new_slots() != nullptr) { 3487 if (page->slot_set<OLD_TO_NEW>() != nullptr) {
ulan 2017/03/20 14:27:14 Seems like potential race condition here. Should w
Hannes Payer (out of office) 2017/03/20 15:07:08 This site is safe since we never go from allocated
3488 page->old_to_new_slots()->RemoveRange( 3488 page->slot_set<OLD_TO_NEW>()->RemoveRange(
3489 0, static_cast<int>(object->address() - page->address()), 3489 0, static_cast<int>(object->address() - page->address()),
3490 SlotSet::PREFREE_EMPTY_BUCKETS); 3490 SlotSet::PREFREE_EMPTY_BUCKETS);
3491 } 3491 }
3492 if (page->typed_old_to_new_slots() != nullptr) { 3492 if (page->typed_slot_set<OLD_TO_NEW>() != nullptr) {
3493 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(), 3493 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3494 object->address()); 3494 object->address());
3495 } 3495 }
3496 RecomputeLiveBytes(page); 3496 RecomputeLiveBytes(page);
3497 } 3497 }
3498 return false; 3498 return false;
3499 } 3499 }
3500 } 3500 }
3501 if (mode == kClearMarkbits) { 3501 if (mode == kClearMarkbits) {
3502 page->ClearLiveness(); 3502 page->ClearLiveness();
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
3585 EvacuateEpilogue(); 3585 EvacuateEpilogue();
3586 } 3586 }
3587 3587
3588 #ifdef VERIFY_HEAP 3588 #ifdef VERIFY_HEAP
3589 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { 3589 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
3590 VerifyEvacuation(heap()); 3590 VerifyEvacuation(heap());
3591 } 3591 }
3592 #endif 3592 #endif
3593 } 3593 }
3594 3594
3595 template <PointerDirection direction> 3595 template <RememberedSetType type>
3596 class PointerUpdateJobTraits { 3596 class PointerUpdateJobTraits {
3597 public: 3597 public:
3598 typedef int PerPageData; // Per page data is not used in this job. 3598 typedef int PerPageData; // Per page data is not used in this job.
3599 typedef int PerTaskData; // Per task data is not used in this job. 3599 typedef int PerTaskData; // Per task data is not used in this job.
3600 3600
3601 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, 3601 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
3602 PerPageData) { 3602 PerPageData) {
3603 UpdateUntypedPointers(heap, chunk); 3603 UpdateUntypedPointers(heap, chunk);
3604 UpdateTypedPointers(heap, chunk); 3604 UpdateTypedPointers(heap, chunk);
3605 return true; 3605 return true;
3606 } 3606 }
3607 static const bool NeedSequentialFinalization = false; 3607 static const bool NeedSequentialFinalization = false;
3608 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { 3608 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3609 } 3609 }
3610 3610
3611 private: 3611 private:
3612 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { 3612 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3613 if (direction == OLD_TO_NEW) { 3613 if (type == OLD_TO_NEW) {
3614 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { 3614 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
3615 return CheckAndUpdateOldToNewSlot(heap, slot); 3615 return CheckAndUpdateOldToNewSlot(heap, slot);
3616 }); 3616 });
3617 } else { 3617 } else {
3618 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { 3618 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
3619 return UpdateSlot(reinterpret_cast<Object**>(slot)); 3619 return UpdateSlot(reinterpret_cast<Object**>(slot));
3620 }); 3620 });
3621 } 3621 }
3622 } 3622 }
3623 3623
3624 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { 3624 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
3625 if (direction == OLD_TO_OLD) { 3625 if (type == OLD_TO_OLD) {
3626 Isolate* isolate = heap->isolate(); 3626 Isolate* isolate = heap->isolate();
3627 RememberedSet<OLD_TO_OLD>::IterateTyped( 3627 RememberedSet<OLD_TO_OLD>::IterateTyped(
3628 chunk, [isolate](SlotType type, Address host_addr, Address slot) { 3628 chunk,
3629 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot, 3629 [isolate](SlotType slot_type, Address host_addr, Address slot) {
3630 UpdateSlot); 3630 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
3631 slot, UpdateSlot);
3631 }); 3632 });
3632 } else { 3633 } else {
3633 Isolate* isolate = heap->isolate(); 3634 Isolate* isolate = heap->isolate();
3634 RememberedSet<OLD_TO_NEW>::IterateTyped( 3635 RememberedSet<OLD_TO_NEW>::IterateTyped(
3635 chunk, 3636 chunk,
3636 [isolate, heap](SlotType type, Address host_addr, Address slot) { 3637 [isolate, heap](SlotType slot_type, Address host_addr, Address slot) {
3637 return UpdateTypedSlotHelper::UpdateTypedSlot( 3638 return UpdateTypedSlotHelper::UpdateTypedSlot(
3638 isolate, type, slot, [heap](Object** slot) { 3639 isolate, slot_type, slot, [heap](Object** slot) {
3639 return CheckAndUpdateOldToNewSlot( 3640 return CheckAndUpdateOldToNewSlot(
3640 heap, reinterpret_cast<Address>(slot)); 3641 heap, reinterpret_cast<Address>(slot));
3641 }); 3642 });
3642 }); 3643 });
3643 } 3644 }
3644 } 3645 }
3645 3646
3646 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, 3647 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
3647 Address slot_address) { 3648 Address slot_address) {
3648 // There may be concurrent action on slots in dead objects. Concurrent 3649 // There may be concurrent action on slots in dead objects. Concurrent
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3693 3694
3694 int NumberOfPointerUpdateTasks(int pages) { 3695 int NumberOfPointerUpdateTasks(int pages) {
3695 if (!FLAG_parallel_pointer_update) return 1; 3696 if (!FLAG_parallel_pointer_update) return 1;
3696 const int available_cores = Max( 3697 const int available_cores = Max(
3697 1, static_cast<int>( 3698 1, static_cast<int>(
3698 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); 3699 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
3699 const int kPagesPerTask = 4; 3700 const int kPagesPerTask = 4;
3700 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); 3701 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
3701 } 3702 }
3702 3703
3703 template <PointerDirection direction> 3704 template <RememberedSetType type>
3704 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { 3705 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3705 PageParallelJob<PointerUpdateJobTraits<direction> > job( 3706 PageParallelJob<PointerUpdateJobTraits<type> > job(
3706 heap, heap->isolate()->cancelable_task_manager(), semaphore); 3707 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3707 RememberedSet<direction>::IterateMemoryChunks( 3708 RememberedSet<type>::IterateMemoryChunks(
3708 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); 3709 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3709 int num_pages = job.NumberOfPages(); 3710 int num_pages = job.NumberOfPages();
3710 int num_tasks = NumberOfPointerUpdateTasks(num_pages); 3711 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3711 job.Run(num_tasks, [](int i) { return 0; }); 3712 job.Run(num_tasks, [](int i) { return 0; });
3712 } 3713 }
3713 3714
3714 class ToSpacePointerUpdateJobTraits { 3715 class ToSpacePointerUpdateJobTraits {
3715 public: 3716 public:
3716 typedef std::pair<Address, Address> PerPageData; 3717 typedef std::pair<Address, Address> PerPageData;
3717 typedef PointersUpdatingVisitor* PerTaskData; 3718 typedef PointersUpdatingVisitor* PerTaskData;
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
3852 const Sweeper::FreeSpaceTreatmentMode free_space_mode = 3853 const Sweeper::FreeSpaceTreatmentMode free_space_mode =
3853 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; 3854 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
3854 if (identity == NEW_SPACE) { 3855 if (identity == NEW_SPACE) {
3855 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); 3856 RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
3856 } else { 3857 } else {
3857 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); 3858 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
3858 } 3859 }
3859 DCHECK(page->SweepingDone()); 3860 DCHECK(page->SweepingDone());
3860 3861
3861 // After finishing sweeping of a page we clean up its remembered set. 3862 // After finishing sweeping of a page we clean up its remembered set.
3862 if (page->typed_old_to_new_slots()) { 3863 if (page->typed_slot_set<OLD_TO_NEW>()) {
ulan 2017/03/20 14:27:15 Cache the value before checking here and below?
Hannes Payer (out of office) 2017/03/20 15:07:08 As before, that should be safe. But let's be doubl
3863 page->typed_old_to_new_slots()->FreeToBeFreedChunks(); 3864 page->typed_slot_set<OLD_TO_NEW>()->FreeToBeFreedChunks();
3864 } 3865 }
3865 if (page->old_to_new_slots()) { 3866 if (page->slot_set<OLD_TO_NEW>()) {
3866 page->old_to_new_slots()->FreeToBeFreedBuckets(); 3867 page->slot_set<OLD_TO_NEW>()->FreeToBeFreedBuckets();
3867 } 3868 }
3868 } 3869 }
3869 3870
3870 { 3871 {
3871 base::LockGuard<base::Mutex> guard(&mutex_); 3872 base::LockGuard<base::Mutex> guard(&mutex_);
3872 swept_list_[identity].Add(page); 3873 swept_list_[identity].Add(page);
3873 } 3874 }
3874 return max_freed; 3875 return max_freed;
3875 } 3876 }
3876 3877
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
4021 // The target is always in old space, we don't have to record the slot in 4022 // The target is always in old space, we don't have to record the slot in
4022 // the old-to-new remembered set. 4023 // the old-to-new remembered set.
4023 DCHECK(!heap()->InNewSpace(target)); 4024 DCHECK(!heap()->InNewSpace(target));
4024 RecordRelocSlot(host, &rinfo, target); 4025 RecordRelocSlot(host, &rinfo, target);
4025 } 4026 }
4026 } 4027 }
4027 } 4028 }
4028 4029
4029 } // namespace internal 4030 } // namespace internal
4030 } // namespace v8 4031 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | src/heap/remembered-set.h » ('j') | src/heap/spaces.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698