| Index: src/heap/mark-compact.cc
|
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
|
| index 72c0b6b27042c03df2539b2e1a974b66839e195c..6651ad01698806dd70e99c411115e520df51bae0 100644
|
| --- a/src/heap/mark-compact.cc
|
| +++ b/src/heap/mark-compact.cc
|
| @@ -689,8 +689,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
| // of a GC all evacuation candidates are cleared and their slot buffers are
|
| // released.
|
| CHECK(!p->IsEvacuationCandidate());
|
| - CHECK_NULL(p->old_to_old_slots());
|
| - CHECK_NULL(p->typed_old_to_old_slots());
|
| + CHECK_NULL(p->slot_set<OLD_TO_OLD>());
|
| + CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
|
| CHECK(p->SweepingDone());
|
| DCHECK(p->area_size() == area_size);
|
| pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
|
| @@ -3430,10 +3430,10 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
|
| MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
|
| MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
|
| AllocationSpace identity = p->owner()->identity();
|
| - if (p->old_to_new_slots() &&
|
| + if (p->slot_set<OLD_TO_NEW>() &&
|
| (identity == OLD_SPACE || identity == MAP_SPACE)) {
|
| return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
|
| - } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) {
|
| + } else if (p->typed_slot_set<OLD_TO_NEW>() && identity == CODE_SPACE) {
|
| return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
|
| }
|
| return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
|
| @@ -3547,7 +3547,10 @@ int MarkCompactCollector::Sweeper::RawSweep(
|
|
|
| // Clear invalid typed slots after collection all free ranges.
|
| if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
|
| - p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges);
|
| + TypedSlotSet* typed_slot_set = p->typed_slot_set<OLD_TO_NEW>();
|
| + if (typed_slot_set != nullptr) {
|
| + typed_slot_set->RemoveInvaldSlots(free_ranges);
|
| + }
|
| }
|
|
|
| // Clear the mark bits of that page and reset live bytes count.
|
| @@ -3606,15 +3609,14 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
|
| state.bitmap()->ClearRange(
|
| chunk->AddressToMarkbitIndex(chunk->area_start()),
|
| chunk->AddressToMarkbitIndex(object->address()));
|
| - if (chunk->old_to_new_slots() != nullptr) {
|
| - chunk->old_to_new_slots()->RemoveRange(
|
| + SlotSet* slot_set = chunk->slot_set<OLD_TO_NEW>();
|
| + if (slot_set != nullptr) {
|
| + slot_set->RemoveRange(
|
| 0, static_cast<int>(object->address() - chunk->address()),
|
| SlotSet::PREFREE_EMPTY_BUCKETS);
|
| }
|
| - if (chunk->typed_old_to_new_slots() != nullptr) {
|
| - RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(),
|
| - object->address());
|
| - }
|
| + RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(chunk, chunk->address(),
|
| + object->address());
|
| RecomputeLiveBytes(chunk, state);
|
| }
|
| return false;
|
| @@ -3714,7 +3716,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
| #endif
|
| }
|
|
|
| -template <PointerDirection direction>
|
| +template <RememberedSetType type>
|
| class PointerUpdateJobTraits {
|
| public:
|
| typedef int PerPageData; // Per page data is not used in this job.
|
| @@ -3732,7 +3734,7 @@ class PointerUpdateJobTraits {
|
|
|
| private:
|
| static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
|
| - if (direction == OLD_TO_NEW) {
|
| + if (type == OLD_TO_NEW) {
|
| RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
|
| return CheckAndUpdateOldToNewSlot(heap, slot);
|
| });
|
| @@ -3744,20 +3746,21 @@ class PointerUpdateJobTraits {
|
| }
|
|
|
| static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
|
| - if (direction == OLD_TO_OLD) {
|
| + if (type == OLD_TO_OLD) {
|
| Isolate* isolate = heap->isolate();
|
| RememberedSet<OLD_TO_OLD>::IterateTyped(
|
| - chunk, [isolate](SlotType type, Address host_addr, Address slot) {
|
| - return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
|
| - UpdateSlot);
|
| + chunk,
|
| + [isolate](SlotType slot_type, Address host_addr, Address slot) {
|
| + return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type,
|
| + slot, UpdateSlot);
|
| });
|
| } else {
|
| Isolate* isolate = heap->isolate();
|
| RememberedSet<OLD_TO_NEW>::IterateTyped(
|
| chunk,
|
| - [isolate, heap](SlotType type, Address host_addr, Address slot) {
|
| + [isolate, heap](SlotType slot_type, Address host_addr, Address slot) {
|
| return UpdateTypedSlotHelper::UpdateTypedSlot(
|
| - isolate, type, slot, [heap](Object** slot) {
|
| + isolate, slot_type, slot, [heap](Object** slot) {
|
| return CheckAndUpdateOldToNewSlot(
|
| heap, reinterpret_cast<Address>(slot));
|
| });
|
| @@ -3824,11 +3827,11 @@ int NumberOfPointerUpdateTasks(int pages) {
|
| return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
|
| }
|
|
|
| -template <PointerDirection direction>
|
| +template <RememberedSetType type>
|
| void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
|
| - PageParallelJob<PointerUpdateJobTraits<direction> > job(
|
| + PageParallelJob<PointerUpdateJobTraits<type> > job(
|
| heap, heap->isolate()->cancelable_task_manager(), semaphore);
|
| - RememberedSet<direction>::IterateMemoryChunks(
|
| + RememberedSet<type>::IterateMemoryChunks(
|
| heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
|
| int num_pages = job.NumberOfPages();
|
| int num_tasks = NumberOfPointerUpdateTasks(num_pages);
|
| @@ -3983,11 +3986,13 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
|
| DCHECK(page->SweepingDone());
|
|
|
| // After finishing sweeping of a page we clean up its remembered set.
|
| - if (page->typed_old_to_new_slots()) {
|
| - page->typed_old_to_new_slots()->FreeToBeFreedChunks();
|
| + TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
|
| + if (typed_slot_set) {
|
| + typed_slot_set->FreeToBeFreedChunks();
|
| }
|
| - if (page->old_to_new_slots()) {
|
| - page->old_to_new_slots()->FreeToBeFreedBuckets();
|
| + SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
|
| + if (slot_set) {
|
| + slot_set->FreeToBeFreedBuckets();
|
| }
|
| }
|
|
|
|
|