Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index f3df6239ce7d051f61e10d8686ee99c76d4fa207..75d0ecd97353c3d037a31059ad99004b2a32cf7a 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -639,8 +639,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
// of a GC all evacuation candidates are cleared and their slot buffers are |
// released. |
CHECK(!p->IsEvacuationCandidate()); |
- CHECK_NULL(p->old_to_old_slots()); |
- CHECK_NULL(p->typed_old_to_old_slots()); |
+ CHECK_NULL(p->slot_set<OLD_TO_OLD>()); |
+ CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>()); |
CHECK(p->SweepingDone()); |
DCHECK(p->area_size() == area_size); |
pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); |
@@ -3312,10 +3312,10 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode |
MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) { |
AllocationSpace identity = p->owner()->identity(); |
- if (p->old_to_new_slots() && |
+ if (p->slot_set<OLD_TO_NEW>() && |
(identity == OLD_SPACE || identity == MAP_SPACE)) { |
return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS; |
- } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) { |
+ } else if (p->typed_slot_set<OLD_TO_NEW>() && identity == CODE_SPACE) { |
return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS; |
} |
return MarkCompactCollector::Sweeper::DO_NOT_CLEAR; |
@@ -3429,7 +3429,7 @@ int MarkCompactCollector::Sweeper::RawSweep( |
// Clear invalid typed slots after collection all free ranges. |
if (slots_clearing_mode == CLEAR_TYPED_SLOTS) { |
- p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges); |
+ p->typed_slot_set<OLD_TO_NEW>()->RemoveInvaldSlots(free_ranges); |
} |
// Clear the mark bits of that page and reset live bytes count. |
@@ -3484,12 +3484,12 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor, |
page->markbits()->ClearRange( |
page->AddressToMarkbitIndex(page->area_start()), |
page->AddressToMarkbitIndex(object->address())); |
- if (page->old_to_new_slots() != nullptr) { |
- page->old_to_new_slots()->RemoveRange( |
+ if (page->slot_set<OLD_TO_NEW>() != nullptr) { |
ulan
2017/03/20 14:27:14
Seems like potential race condition here. Should w
Hannes Payer (out of office)
2017/03/20 15:07:08
This site is safe since we never go from allocated
|
+ page->slot_set<OLD_TO_NEW>()->RemoveRange( |
0, static_cast<int>(object->address() - page->address()), |
SlotSet::PREFREE_EMPTY_BUCKETS); |
} |
- if (page->typed_old_to_new_slots() != nullptr) { |
+ if (page->typed_slot_set<OLD_TO_NEW>() != nullptr) { |
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(), |
object->address()); |
} |
@@ -3592,7 +3592,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
#endif |
} |
-template <PointerDirection direction> |
+template <RememberedSetType type> |
class PointerUpdateJobTraits { |
public: |
typedef int PerPageData; // Per page data is not used in this job. |
@@ -3610,7 +3610,7 @@ class PointerUpdateJobTraits { |
private: |
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) { |
- if (direction == OLD_TO_NEW) { |
+ if (type == OLD_TO_NEW) { |
RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) { |
return CheckAndUpdateOldToNewSlot(heap, slot); |
}); |
@@ -3622,20 +3622,21 @@ class PointerUpdateJobTraits { |
} |
static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) { |
- if (direction == OLD_TO_OLD) { |
+ if (type == OLD_TO_OLD) { |
Isolate* isolate = heap->isolate(); |
RememberedSet<OLD_TO_OLD>::IterateTyped( |
- chunk, [isolate](SlotType type, Address host_addr, Address slot) { |
- return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot, |
- UpdateSlot); |
+ chunk, |
+ [isolate](SlotType slot_type, Address host_addr, Address slot) { |
+ return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, slot_type, |
+ slot, UpdateSlot); |
}); |
} else { |
Isolate* isolate = heap->isolate(); |
RememberedSet<OLD_TO_NEW>::IterateTyped( |
chunk, |
- [isolate, heap](SlotType type, Address host_addr, Address slot) { |
+ [isolate, heap](SlotType slot_type, Address host_addr, Address slot) { |
return UpdateTypedSlotHelper::UpdateTypedSlot( |
- isolate, type, slot, [heap](Object** slot) { |
+ isolate, slot_type, slot, [heap](Object** slot) { |
return CheckAndUpdateOldToNewSlot( |
heap, reinterpret_cast<Address>(slot)); |
}); |
@@ -3700,11 +3701,11 @@ int NumberOfPointerUpdateTasks(int pages) { |
return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask); |
} |
-template <PointerDirection direction> |
+template <RememberedSetType type> |
void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) { |
- PageParallelJob<PointerUpdateJobTraits<direction> > job( |
+ PageParallelJob<PointerUpdateJobTraits<type> > job( |
heap, heap->isolate()->cancelable_task_manager(), semaphore); |
- RememberedSet<direction>::IterateMemoryChunks( |
+ RememberedSet<type>::IterateMemoryChunks( |
heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); }); |
int num_pages = job.NumberOfPages(); |
int num_tasks = NumberOfPointerUpdateTasks(num_pages); |
@@ -3859,11 +3860,11 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, |
DCHECK(page->SweepingDone()); |
// After finishing sweeping of a page we clean up its remembered set. |
- if (page->typed_old_to_new_slots()) { |
- page->typed_old_to_new_slots()->FreeToBeFreedChunks(); |
+ if (page->typed_slot_set<OLD_TO_NEW>()) { |
ulan
2017/03/20 14:27:15
Cache the value before checking here and below?
Hannes Payer (out of office)
2017/03/20 15:07:08
As before, that should be safe. But let's be doubl
|
+ page->typed_slot_set<OLD_TO_NEW>()->FreeToBeFreedChunks(); |
} |
- if (page->old_to_new_slots()) { |
- page->old_to_new_slots()->FreeToBeFreedBuckets(); |
+ if (page->slot_set<OLD_TO_NEW>()) { |
+ page->slot_set<OLD_TO_NEW>()->FreeToBeFreedBuckets(); |
} |
} |