Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(110)

Side by Side Diff: src/heap/mark-compact.cc

Issue 2764473002: [heap] Make SlotSet allocation thread-safe and refactor code. (Closed)
Patch Set: format Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/heap/remembered-set.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 612 matching lines...) Expand 10 before | Expand all | Expand 10 after
623 space->top() == space->limit() 623 space->top() == space->limit()
624 ? nullptr 624 ? nullptr
625 : Page::FromAllocationAreaAddress(space->top()); 625 : Page::FromAllocationAreaAddress(space->top());
626 for (Page* p : *space) { 626 for (Page* p : *space) {
627 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue; 627 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
628 // Invariant: Evacuation candidates are just created when marking is 628 // Invariant: Evacuation candidates are just created when marking is
629 // started. This means that sweeping has finished. Furthermore, at the end 629 // started. This means that sweeping has finished. Furthermore, at the end
630 // of a GC all evacuation candidates are cleared and their slot buffers are 630 // of a GC all evacuation candidates are cleared and their slot buffers are
631 // released. 631 // released.
632 CHECK(!p->IsEvacuationCandidate()); 632 CHECK(!p->IsEvacuationCandidate());
633 CHECK_NULL(p->old_to_old_slots()); 633 CHECK_NULL(p->slot_set<OLD_TO_OLD>());
634 CHECK_NULL(p->typed_old_to_old_slots()); 634 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
635 CHECK(p->SweepingDone()); 635 CHECK(p->SweepingDone());
636 DCHECK(p->area_size() == area_size); 636 DCHECK(p->area_size() == area_size);
637 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); 637 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
638 } 638 }
639 639
640 int candidate_count = 0; 640 int candidate_count = 0;
641 size_t total_live_bytes = 0; 641 size_t total_live_bytes = 0;
642 642
643 const bool reduce_memory = heap()->ShouldReduceMemory(); 643 const bool reduce_memory = heap()->ShouldReduceMemory();
644 if (FLAG_manual_evacuation_candidates_selection) { 644 if (FLAG_manual_evacuation_candidates_selection) {
(...skipping 2652 matching lines...) Expand 10 before | Expand all | Expand 10 after
3297 return map_word.ToForwardingAddress(); 3297 return map_word.ToForwardingAddress();
3298 } 3298 }
3299 } 3299 }
3300 return object; 3300 return object;
3301 } 3301 }
3302 }; 3302 };
3303 3303
3304 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode 3304 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
3305 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) { 3305 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
3306 AllocationSpace identity = p->owner()->identity(); 3306 AllocationSpace identity = p->owner()->identity();
3307 if (p->old_to_new_slots() && 3307 if (p->slot_set<OLD_TO_NEW>() &&
3308 (identity == OLD_SPACE || identity == MAP_SPACE)) { 3308 (identity == OLD_SPACE || identity == MAP_SPACE)) {
3309 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS; 3309 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
3310 } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) { 3310 } else if (p->typed_slot_set<OLD_TO_NEW>() && identity == CODE_SPACE) {
3311 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS; 3311 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
3312 } 3312 }
3313 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR; 3313 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
3314 } 3314 }
3315 3315
3316 int MarkCompactCollector::Sweeper::RawSweep( 3316 int MarkCompactCollector::Sweeper::RawSweep(
3317 Page* p, FreeListRebuildingMode free_list_mode, 3317 Page* p, FreeListRebuildingMode free_list_mode,
3318 FreeSpaceTreatmentMode free_space_mode) { 3318 FreeSpaceTreatmentMode free_space_mode) {
3319 Space* space = p->owner(); 3319 Space* space = p->owner();
3320 DCHECK_NOT_NULL(space); 3320 DCHECK_NOT_NULL(space);
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
3414 SlotSet::KEEP_EMPTY_BUCKETS); 3414 SlotSet::KEEP_EMPTY_BUCKETS);
3415 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { 3415 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3416 free_ranges.insert(std::pair<uint32_t, uint32_t>( 3416 free_ranges.insert(std::pair<uint32_t, uint32_t>(
3417 static_cast<uint32_t>(free_start - p->address()), 3417 static_cast<uint32_t>(free_start - p->address()),
3418 static_cast<uint32_t>(p->area_end() - p->address()))); 3418 static_cast<uint32_t>(p->area_end() - p->address())));
3419 } 3419 }
3420 } 3420 }
3421 3421
3422 // Clear invalid typed slots after collection all free ranges. 3422 // Clear invalid typed slots after collection all free ranges.
3423 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { 3423 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3424 p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges); 3424 p->typed_slot_set<OLD_TO_NEW>()->RemoveInvaldSlots(free_ranges);
3425 } 3425 }
3426 3426
3427 // Clear the mark bits of that page and reset live bytes count. 3427 // Clear the mark bits of that page and reset live bytes count.
3428 p->ClearLiveness(); 3428 p->ClearLiveness();
3429 3429
3430 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); 3430 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3431 if (free_list_mode == IGNORE_FREE_LIST) return 0; 3431 if (free_list_mode == IGNORE_FREE_LIST) return 0;
3432 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes)); 3432 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
3433 } 3433 }
3434 3434
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
3469 IterationMode mode) { 3469 IterationMode mode) {
3470 LiveObjectIterator<kBlackObjects> it(page); 3470 LiveObjectIterator<kBlackObjects> it(page);
3471 HeapObject* object = nullptr; 3471 HeapObject* object = nullptr;
3472 while ((object = it.Next()) != nullptr) { 3472 while ((object = it.Next()) != nullptr) {
3473 DCHECK(ObjectMarking::IsBlack(object)); 3473 DCHECK(ObjectMarking::IsBlack(object));
3474 if (!visitor->Visit(object)) { 3474 if (!visitor->Visit(object)) {
3475 if (mode == kClearMarkbits) { 3475 if (mode == kClearMarkbits) {
3476 page->markbits()->ClearRange( 3476 page->markbits()->ClearRange(
3477 page->AddressToMarkbitIndex(page->area_start()), 3477 page->AddressToMarkbitIndex(page->area_start()),
3478 page->AddressToMarkbitIndex(object->address())); 3478 page->AddressToMarkbitIndex(object->address()));
3479 if (page->old_to_new_slots() != nullptr) { 3479 SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
3480 page->old_to_new_slots()->RemoveRange( 3480 if (slot_set != nullptr) {
3481 slot_set->RemoveRange(
3481 0, static_cast<int>(object->address() - page->address()), 3482 0, static_cast<int>(object->address() - page->address()),
3482 SlotSet::PREFREE_EMPTY_BUCKETS); 3483 SlotSet::PREFREE_EMPTY_BUCKETS);
3483 } 3484 }
3484 if (page->typed_old_to_new_slots() != nullptr) { 3485 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3485 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(), 3486 object->address());
3486 object->address());
3487 }
3488 RecomputeLiveBytes(page); 3487 RecomputeLiveBytes(page);
3489 } 3488 }
3490 return false; 3489 return false;
3491 } 3490 }
3492 } 3491 }
3493 if (mode == kClearMarkbits) { 3492 if (mode == kClearMarkbits) {
3494 page->ClearLiveness(); 3493 page->ClearLiveness();
3495 } 3494 }
3496 return true; 3495 return true;
3497 } 3496 }
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
3577 EvacuateEpilogue(); 3576 EvacuateEpilogue();
3578 } 3577 }
3579 3578
3580 #ifdef VERIFY_HEAP 3579 #ifdef VERIFY_HEAP
3581 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { 3580 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
3582 VerifyEvacuation(heap()); 3581 VerifyEvacuation(heap());
3583 } 3582 }
3584 #endif 3583 #endif
3585 } 3584 }
3586 3585
3587 template <PointerDirection direction> 3586 template <RememberedSetType type>
3588 class PointerUpdateJobTraits { 3587 class PointerUpdateJobTraits {
3589 public: 3588 public:
3590 typedef int PerPageData; // Per page data is not used in this job. 3589 typedef int PerPageData; // Per page data is not used in this job.
3591 typedef int PerTaskData; // Per task data is not used in this job. 3590 typedef int PerTaskData; // Per task data is not used in this job.
3592 3591
3593 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk, 3592 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
3594 PerPageData) { 3593 PerPageData) {
3595 UpdateUntypedPointers(heap, chunk); 3594 UpdateUntypedPointers(heap, chunk);
3596 UpdateTypedPointers(heap, chunk); 3595 UpdateTypedPointers(heap, chunk);
3597 return true; 3596 return true;
3598 } 3597 }
3599 static const bool NeedSequentialFinalization = false; 3598 static const bool NeedSequentialFinalization = false;
3600 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) { 3599 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3601 } 3600 }
3602 3601
3603 private: 3602 private:
3604 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { 3603 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3605 if (direction == OLD_TO_NEW) { 3604 if (type == OLD_TO_NEW) {
3606 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { 3605 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
3607 return CheckAndUpdateOldToNewSlot(heap, slot); 3606 return CheckAndUpdateOldToNewSlot(heap, slot);
3608 }); 3607 });
3609 } else { 3608 } else {
3610 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) { 3609 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
3611 return UpdateSlot(reinterpret_cast<Object**>(slot)); 3610 return UpdateSlot(reinterpret_cast<Object**>(slot));
3612 }); 3611 });
3613 } 3612 }
3614 } 3613 }
3615 3614
3616 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { 3615 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
3617 if (direction == OLD_TO_OLD) { 3616 if (type == OLD_TO_OLD) {
3618 Isolate* isolate = heap->isolate(); 3617 Isolate* isolate = heap->isolate();
3619 RememberedSet<OLD_TO_OLD>::IterateTyped( 3618 RememberedSet<OLD_TO_OLD>::IterateTyped(
3620 chunk, [isolate](SlotType type, Address host_addr, Address slot) { 3619 chunk,
3621 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot, 3620 [isolate](SlotType slot_type, Address host_addr, Address slot) {
3622 UpdateSlot); 3621 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
3622 slot, UpdateSlot);
3623 }); 3623 });
3624 } else { 3624 } else {
3625 Isolate* isolate = heap->isolate(); 3625 Isolate* isolate = heap->isolate();
3626 RememberedSet<OLD_TO_NEW>::IterateTyped( 3626 RememberedSet<OLD_TO_NEW>::IterateTyped(
3627 chunk, 3627 chunk,
3628 [isolate, heap](SlotType type, Address host_addr, Address slot) { 3628 [isolate, heap](SlotType slot_type, Address host_addr, Address slot) {
3629 return UpdateTypedSlotHelper::UpdateTypedSlot( 3629 return UpdateTypedSlotHelper::UpdateTypedSlot(
3630 isolate, type, slot, [heap](Object** slot) { 3630 isolate, slot_type, slot, [heap](Object** slot) {
3631 return CheckAndUpdateOldToNewSlot( 3631 return CheckAndUpdateOldToNewSlot(
3632 heap, reinterpret_cast<Address>(slot)); 3632 heap, reinterpret_cast<Address>(slot));
3633 }); 3633 });
3634 }); 3634 });
3635 } 3635 }
3636 } 3636 }
3637 3637
3638 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap, 3638 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
3639 Address slot_address) { 3639 Address slot_address) {
3640 // There may be concurrent action on slots in dead objects. Concurrent 3640 // There may be concurrent action on slots in dead objects. Concurrent
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3685 3685
3686 int NumberOfPointerUpdateTasks(int pages) { 3686 int NumberOfPointerUpdateTasks(int pages) {
3687 if (!FLAG_parallel_pointer_update) return 1; 3687 if (!FLAG_parallel_pointer_update) return 1;
3688 const int available_cores = Max( 3688 const int available_cores = Max(
3689 1, static_cast<int>( 3689 1, static_cast<int>(
3690 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); 3690 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
3691 const int kPagesPerTask = 4; 3691 const int kPagesPerTask = 4;
3692 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); 3692 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
3693 } 3693 }
3694 3694
3695 template <PointerDirection direction> 3695 template <RememberedSetType type>
3696 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { 3696 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3697 PageParallelJob<PointerUpdateJobTraits<direction> > job( 3697 PageParallelJob<PointerUpdateJobTraits<type> > job(
3698 heap, heap->isolate()->cancelable_task_manager(), semaphore); 3698 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3699 RememberedSet<direction>::IterateMemoryChunks( 3699 RememberedSet<type>::IterateMemoryChunks(
3700 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); 3700 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3701 int num_pages = job.NumberOfPages(); 3701 int num_pages = job.NumberOfPages();
3702 int num_tasks = NumberOfPointerUpdateTasks(num_pages); 3702 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3703 job.Run(num_tasks, [](int i) { return 0; }); 3703 job.Run(num_tasks, [](int i) { return 0; });
3704 } 3704 }
3705 3705
3706 class ToSpacePointerUpdateJobTraits { 3706 class ToSpacePointerUpdateJobTraits {
3707 public: 3707 public:
3708 typedef std::pair<Address, Address> PerPageData; 3708 typedef std::pair<Address, Address> PerPageData;
3709 typedef PointersUpdatingVisitor* PerTaskData; 3709 typedef PointersUpdatingVisitor* PerTaskData;
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
3844 const Sweeper::FreeSpaceTreatmentMode free_space_mode = 3844 const Sweeper::FreeSpaceTreatmentMode free_space_mode =
3845 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; 3845 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
3846 if (identity == NEW_SPACE) { 3846 if (identity == NEW_SPACE) {
3847 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); 3847 RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
3848 } else { 3848 } else {
3849 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); 3849 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
3850 } 3850 }
3851 DCHECK(page->SweepingDone()); 3851 DCHECK(page->SweepingDone());
3852 3852
3853 // After finishing sweeping of a page we clean up its remembered set. 3853 // After finishing sweeping of a page we clean up its remembered set.
3854 if (page->typed_old_to_new_slots()) { 3854 TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
3855 page->typed_old_to_new_slots()->FreeToBeFreedChunks(); 3855 if (typed_slot_set) {
3856 page->typed_slot_set<OLD_TO_NEW>()->FreeToBeFreedChunks();
3856 } 3857 }
3857 if (page->old_to_new_slots()) { 3858 SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
3858 page->old_to_new_slots()->FreeToBeFreedBuckets(); 3859 if (slot_set) {
3860 page->slot_set<OLD_TO_NEW>()->FreeToBeFreedBuckets();
3859 } 3861 }
3860 } 3862 }
3861 3863
3862 { 3864 {
3863 base::LockGuard<base::Mutex> guard(&mutex_); 3865 base::LockGuard<base::Mutex> guard(&mutex_);
3864 swept_list_[identity].Add(page); 3866 swept_list_[identity].Add(page);
3865 } 3867 }
3866 return max_freed; 3868 return max_freed;
3867 } 3869 }
3868 3870
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
4013 // The target is always in old space, we don't have to record the slot in 4015 // The target is always in old space, we don't have to record the slot in
4014 // the old-to-new remembered set. 4016 // the old-to-new remembered set.
4015 DCHECK(!heap()->InNewSpace(target)); 4017 DCHECK(!heap()->InNewSpace(target));
4016 RecordRelocSlot(host, &rinfo, target); 4018 RecordRelocSlot(host, &rinfo, target);
4017 } 4019 }
4018 } 4020 }
4019 } 4021 }
4020 4022
4021 } // namespace internal 4023 } // namespace internal
4022 } // namespace v8 4024 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | src/heap/remembered-set.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698