Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 646e63402a6eb2c0f949b7b35dcc0bd3aa4b3967..b2143d63443da94bd99a2fa9a22cecae5bf80b5a 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -21,7 +21,6 @@ |
#include "src/heap/object-stats.h" |
#include "src/heap/objects-visiting-inl.h" |
#include "src/heap/objects-visiting.h" |
-#include "src/heap/slots-buffer.h" |
#include "src/heap/spaces-inl.h" |
#include "src/ic/ic.h" |
#include "src/ic/stub-cache.h" |
@@ -55,8 +54,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) |
marking_parity_(ODD_MARKING_PARITY), |
was_marked_incrementally_(false), |
evacuation_(false), |
- slots_buffer_allocator_(nullptr), |
- migration_slots_buffer_(nullptr), |
heap_(heap), |
marking_deque_memory_(NULL), |
marking_deque_memory_committed_(0), |
@@ -249,7 +246,6 @@ void MarkCompactCollector::SetUp() { |
free_list_map_space_.Reset(new FreeList(heap_->map_space())); |
EnsureMarkingDequeIsReserved(); |
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); |
- slots_buffer_allocator_ = new SlotsBufferAllocator(); |
if (FLAG_flush_code) { |
code_flusher_ = new CodeFlusher(isolate()); |
@@ -263,7 +259,6 @@ void MarkCompactCollector::SetUp() { |
void MarkCompactCollector::TearDown() { |
AbortCompaction(); |
delete marking_deque_memory_; |
- delete slots_buffer_allocator_; |
delete code_flusher_; |
} |
@@ -310,55 +305,26 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) { |
return compacting_; |
} |
- |
-void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { |
+void MarkCompactCollector::ClearInvalidRememberedSetSlots() { |
{ |
GCTracer::Scope gc_scope(heap()->tracer(), |
GCTracer::Scope::MC_CLEAR_STORE_BUFFER); |
RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap()); |
} |
+// There is not need to filter the old to old set because |
+// it is completely cleared after the mark-compact GC. |
+// The slots that become invalid due to runtime transitions are |
+// cleared eagerly immediately after the transition. |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); |
- for (Page* p : evacuation_candidates_) { |
- SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); |
- } |
- } |
#ifdef VERIFY_HEAP |
if (FLAG_verify_heap) { |
- VerifyValidStoreAndSlotsBufferEntries(); |
+ RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap()); |
+ RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap()); |
} |
#endif |
} |
-#ifdef VERIFY_HEAP |
-static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) { |
- PageIterator it(space); |
- while (it.has_next()) { |
- Page* p = it.next(); |
- SlotsBuffer::VerifySlots(heap, p->slots_buffer()); |
- } |
-} |
- |
- |
-void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() { |
- RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap()); |
- |
- VerifyValidSlotsBufferEntries(heap(), heap()->old_space()); |
- VerifyValidSlotsBufferEntries(heap(), heap()->code_space()); |
- VerifyValidSlotsBufferEntries(heap(), heap()->map_space()); |
- |
- LargeObjectIterator it(heap()->lo_space()); |
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
- SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer()); |
- } |
-} |
-#endif |
- |
- |
void MarkCompactCollector::CollectGarbage() { |
// Make sure that Prepare() has been called. The individual steps below will |
// update the state as they proceed. |
@@ -708,7 +674,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
// of a GC all evacuation candidates are cleared and their slot buffers are |
// released. |
CHECK(!p->IsEvacuationCandidate()); |
- CHECK(p->slots_buffer() == nullptr); |
+ CHECK_NULL(p->old_to_old_slots()); |
+ CHECK_NULL(p->typed_old_to_old_slots()); |
CHECK(p->SweepingDone()); |
DCHECK(p->area_size() == area_size); |
pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); |
@@ -814,8 +781,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
void MarkCompactCollector::AbortCompaction() { |
if (compacting_) { |
+ RememberedSet<OLD_TO_OLD>::ClearAll(heap()); |
for (Page* p : evacuation_candidates_) { |
- slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
p->ClearEvacuationCandidate(); |
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
} |
@@ -1231,9 +1198,11 @@ class MarkCompactMarkingVisitor |
// was marked through the compilation cache before marker reached JSRegExp |
// object. |
FixedArray* data = FixedArray::cast(re->data()); |
- Object** slot = |
- data->data_start() + JSRegExp::saved_code_index(is_one_byte); |
- heap->mark_compact_collector()->RecordSlot(data, slot, code); |
+ if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) { |
+ Object** slot = |
+ data->data_start() + JSRegExp::saved_code_index(is_one_byte); |
+ heap->mark_compact_collector()->RecordSlot(data, slot, code); |
+ } |
// Set a number in the 0-255 range to guarantee no smi overflow. |
re->SetDataAt(JSRegExp::code_index(is_one_byte), |
@@ -1530,12 +1499,12 @@ class MarkCompactCollector::EvacuateVisitorBase |
: public MarkCompactCollector::HeapObjectVisitor { |
public: |
EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, |
- SlotsBuffer** evacuation_slots_buffer, |
- LocalStoreBuffer* local_store_buffer) |
+ LocalSlotsBuffer* old_to_old_slots, |
+ LocalSlotsBuffer* old_to_new_slots) |
: heap_(heap), |
- evacuation_slots_buffer_(evacuation_slots_buffer), |
compaction_spaces_(compaction_spaces), |
- local_store_buffer_(local_store_buffer) {} |
+ old_to_old_slots_(old_to_old_slots), |
+ old_to_new_slots_(old_to_new_slots) {} |
bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, |
HeapObject** target_object) { |
@@ -1545,7 +1514,7 @@ class MarkCompactCollector::EvacuateVisitorBase |
if (allocation.To(target_object)) { |
heap_->mark_compact_collector()->MigrateObject( |
*target_object, object, size, target_space->identity(), |
- evacuation_slots_buffer_, local_store_buffer_); |
+ old_to_old_slots_, old_to_new_slots_); |
return true; |
} |
return false; |
@@ -1553,9 +1522,9 @@ class MarkCompactCollector::EvacuateVisitorBase |
protected: |
Heap* heap_; |
- SlotsBuffer** evacuation_slots_buffer_; |
CompactionSpaceCollection* compaction_spaces_; |
- LocalStoreBuffer* local_store_buffer_; |
+ LocalSlotsBuffer* old_to_old_slots_; |
+ LocalSlotsBuffer* old_to_new_slots_; |
}; |
@@ -1567,11 +1536,11 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final |
explicit EvacuateNewSpaceVisitor(Heap* heap, |
CompactionSpaceCollection* compaction_spaces, |
- SlotsBuffer** evacuation_slots_buffer, |
- LocalStoreBuffer* local_store_buffer, |
+ LocalSlotsBuffer* old_to_old_slots, |
+ LocalSlotsBuffer* old_to_new_slots, |
HashMap* local_pretenuring_feedback) |
- : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer, |
- local_store_buffer), |
+ : EvacuateVisitorBase(heap, compaction_spaces, old_to_old_slots, |
+ old_to_new_slots), |
buffer_(LocalAllocationBuffer::InvalidBuffer()), |
space_to_allocate_(NEW_SPACE), |
promoted_size_(0), |
@@ -1598,8 +1567,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final |
AllocationSpace space = AllocateTargetObject(object, &target); |
heap_->mark_compact_collector()->MigrateObject( |
HeapObject::cast(target), object, size, space, |
- (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_, |
- (space == NEW_SPACE) ? nullptr : local_store_buffer_); |
+ (space == NEW_SPACE) ? nullptr : old_to_old_slots_, |
+ (space == NEW_SPACE) ? nullptr : old_to_new_slots_); |
if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
} |
@@ -1719,10 +1688,10 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final |
public: |
EvacuateOldSpaceVisitor(Heap* heap, |
CompactionSpaceCollection* compaction_spaces, |
- SlotsBuffer** evacuation_slots_buffer, |
- LocalStoreBuffer* local_store_buffer) |
- : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer, |
- local_store_buffer) {} |
+ LocalSlotsBuffer* old_to_old_slots, |
+ LocalSlotsBuffer* old_to_new_slots) |
+ : EvacuateVisitorBase(heap, compaction_spaces, old_to_old_slots, |
+ old_to_new_slots) {} |
bool Visit(HeapObject* object) override { |
CompactionSpace* target_space = compaction_spaces_->Get( |
@@ -2179,7 +2148,7 @@ void MarkCompactCollector::ClearNonLiveReferences() { |
ClearWeakCollections(); |
- ClearInvalidStoreAndSlotsBufferEntries(); |
+ ClearInvalidRememberedSetSlots(); |
} |
@@ -2542,88 +2511,56 @@ void MarkCompactCollector::AbortTransitionArrays() { |
} |
void MarkCompactCollector::RecordMigratedSlot( |
- Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer, |
- LocalStoreBuffer* local_store_buffer) { |
+ Object* value, Address slot, LocalSlotsBuffer* old_to_old_slots, |
+ LocalSlotsBuffer* old_to_new_slots) { |
// When parallel compaction is in progress, store and slots buffer entries |
// require synchronization. |
if (heap_->InNewSpace(value)) { |
if (compaction_in_progress_) { |
- local_store_buffer->Record(slot); |
+ old_to_new_slots->Record(slot); |
} else { |
Page* page = Page::FromAddress(slot); |
RememberedSet<OLD_TO_NEW>::Insert(page, slot); |
} |
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { |
- SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, |
- reinterpret_cast<Object**>(slot), |
- SlotsBuffer::IGNORE_OVERFLOW); |
- } |
-} |
- |
- |
-void MarkCompactCollector::RecordMigratedCodeEntrySlot( |
- Address code_entry, Address code_entry_slot, |
- SlotsBuffer** evacuation_slots_buffer) { |
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
- SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, |
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot, |
- SlotsBuffer::IGNORE_OVERFLOW); |
+ old_to_old_slots->Record(slot); |
} |
} |
- |
-void MarkCompactCollector::RecordMigratedCodeObjectSlot( |
- Address code_object, SlotsBuffer** evacuation_slots_buffer) { |
- SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, |
- SlotsBuffer::RELOCATED_CODE_OBJECT, code_object, |
- SlotsBuffer::IGNORE_OVERFLOW); |
-} |
- |
- |
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { |
+static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { |
if (RelocInfo::IsCodeTarget(rmode)) { |
- return SlotsBuffer::CODE_TARGET_SLOT; |
+ return CODE_TARGET_SLOT; |
} else if (RelocInfo::IsCell(rmode)) { |
- return SlotsBuffer::CELL_TARGET_SLOT; |
+ return CELL_TARGET_SLOT; |
} else if (RelocInfo::IsEmbeddedObject(rmode)) { |
- return SlotsBuffer::EMBEDDED_OBJECT_SLOT; |
+ return EMBEDDED_OBJECT_SLOT; |
} else if (RelocInfo::IsDebugBreakSlot(rmode)) { |
- return SlotsBuffer::DEBUG_TARGET_SLOT; |
+ return DEBUG_TARGET_SLOT; |
} |
UNREACHABLE(); |
- return SlotsBuffer::NUMBER_OF_SLOT_TYPES; |
-} |
- |
- |
-static inline SlotsBuffer::SlotType DecodeSlotType( |
- SlotsBuffer::ObjectSlot slot) { |
- return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); |
+ return NUMBER_OF_SLOT_TYPES; |
} |
- |
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { |
+void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo, |
+ Object* target) { |
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); |
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host)); |
RelocInfo::Mode rmode = rinfo->rmode(); |
if (target_page->IsEvacuationCandidate() && |
(rinfo->host() == NULL || |
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) { |
Address addr = rinfo->pc(); |
- SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode); |
+ SlotType slot_type = SlotTypeForRMode(rmode); |
if (rinfo->IsInConstantPool()) { |
addr = rinfo->constant_pool_entry_address(); |
if (RelocInfo::IsCodeTarget(rmode)) { |
- slot_type = SlotsBuffer::CODE_ENTRY_SLOT; |
+ slot_type = CODE_ENTRY_SLOT; |
} else { |
DCHECK(RelocInfo::IsEmbeddedObject(rmode)); |
- slot_type = SlotsBuffer::OBJECT_SLOT; |
+ slot_type = OBJECT_SLOT; |
} |
} |
- bool success = SlotsBuffer::AddTo( |
- slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type, |
- addr, SlotsBuffer::FAIL_ON_OVERFLOW); |
- if (!success) { |
- EvictPopularEvacuationCandidate(target_page); |
- } |
+ RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr); |
} |
} |
@@ -2631,23 +2568,21 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { |
class RecordMigratedSlotVisitor final : public ObjectVisitor { |
public: |
RecordMigratedSlotVisitor(MarkCompactCollector* collector, |
- SlotsBuffer** evacuation_slots_buffer, |
- LocalStoreBuffer* local_store_buffer) |
+ LocalSlotsBuffer* old_to_old_slots, |
+ LocalSlotsBuffer* old_to_new_slots) |
: collector_(collector), |
- evacuation_slots_buffer_(evacuation_slots_buffer), |
- local_store_buffer_(local_store_buffer) {} |
+ old_to_old_slots_(old_to_old_slots), |
+ old_to_new_slots_(old_to_new_slots) {} |
V8_INLINE void VisitPointer(Object** p) override { |
collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), |
- evacuation_slots_buffer_, |
- local_store_buffer_); |
+ old_to_old_slots_, old_to_new_slots_); |
} |
V8_INLINE void VisitPointers(Object** start, Object** end) override { |
while (start < end) { |
collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), |
- evacuation_slots_buffer_, |
- local_store_buffer_); |
+ old_to_old_slots_, old_to_new_slots_); |
++start; |
} |
} |
@@ -2655,15 +2590,16 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { |
if (collector_->compacting_) { |
Address code_entry = Memory::Address_at(code_entry_slot); |
- collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, |
- evacuation_slots_buffer_); |
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
+ old_to_old_slots_->Record(CODE_ENTRY_SLOT, code_entry_slot); |
+ } |
} |
} |
private: |
MarkCompactCollector* collector_; |
- SlotsBuffer** evacuation_slots_buffer_; |
- LocalStoreBuffer* local_store_buffer_; |
+ LocalSlotsBuffer* old_to_old_slots_; |
+ LocalSlotsBuffer* old_to_new_slots_; |
}; |
@@ -2683,31 +2619,28 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor { |
// pointers to new space. |
void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, |
int size, AllocationSpace dest, |
- SlotsBuffer** evacuation_slots_buffer, |
- LocalStoreBuffer* local_store_buffer) { |
+ LocalSlotsBuffer* old_to_old_slots, |
+ LocalSlotsBuffer* old_to_new_slots) { |
Address dst_addr = dst->address(); |
Address src_addr = src->address(); |
DCHECK(heap()->AllowedToBeMigrated(src, dest)); |
DCHECK(dest != LO_SPACE); |
if (dest == OLD_SPACE) { |
DCHECK_OBJECT_SIZE(size); |
- DCHECK(evacuation_slots_buffer != nullptr); |
DCHECK(IsAligned(size, kPointerSize)); |
heap()->MoveBlock(dst->address(), src->address(), size); |
- RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer, |
- local_store_buffer); |
+ RecordMigratedSlotVisitor visitor(this, old_to_old_slots, old_to_new_slots); |
dst->IterateBody(&visitor); |
} else if (dest == CODE_SPACE) { |
DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); |
- DCHECK(evacuation_slots_buffer != nullptr); |
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); |
heap()->MoveBlock(dst_addr, src_addr, size); |
- RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); |
+ old_to_old_slots->Record(RELOCATED_CODE_OBJECT, dst_addr); |
Code::cast(dst)->Relocate(dst_addr - src_addr); |
} else { |
DCHECK_OBJECT_SIZE(size); |
- DCHECK(evacuation_slots_buffer == nullptr); |
+ DCHECK(old_to_old_slots == nullptr); |
DCHECK(dest == NEW_SPACE); |
heap()->MoveBlock(dst_addr, src_addr, size); |
} |
@@ -2715,41 +2648,40 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, |
Memory::Address_at(src_addr) = dst_addr; |
} |
- |
-static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v, |
- SlotsBuffer::SlotType slot_type, Address addr) { |
+static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v, |
+ SlotType slot_type, Address addr) { |
switch (slot_type) { |
- case SlotsBuffer::CODE_TARGET_SLOT: { |
+ case CODE_TARGET_SLOT: { |
RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL); |
rinfo.Visit(isolate, v); |
break; |
} |
- case SlotsBuffer::CELL_TARGET_SLOT: { |
+ case CELL_TARGET_SLOT: { |
RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL); |
rinfo.Visit(isolate, v); |
break; |
} |
- case SlotsBuffer::CODE_ENTRY_SLOT: { |
+ case CODE_ENTRY_SLOT: { |
v->VisitCodeEntry(addr); |
break; |
} |
- case SlotsBuffer::RELOCATED_CODE_OBJECT: { |
+ case RELOCATED_CODE_OBJECT: { |
HeapObject* obj = HeapObject::FromAddress(addr); |
Code::BodyDescriptor::IterateBody(obj, v); |
break; |
} |
- case SlotsBuffer::DEBUG_TARGET_SLOT: { |
+ case DEBUG_TARGET_SLOT: { |
RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0, |
NULL); |
if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v); |
break; |
} |
- case SlotsBuffer::EMBEDDED_OBJECT_SLOT: { |
+ case EMBEDDED_OBJECT_SLOT: { |
RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL); |
rinfo.Visit(isolate, v); |
break; |
} |
- case SlotsBuffer::OBJECT_SLOT: { |
+ case OBJECT_SLOT: { |
v->VisitPointer(reinterpret_cast<Object**>(addr)); |
break; |
} |
@@ -2854,32 +2786,6 @@ class PointersUpdatingVisitor : public ObjectVisitor { |
}; |
-void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) { |
- PointersUpdatingVisitor v(heap_); |
- size_t buffer_size = buffer->Size(); |
- |
- for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) { |
- SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx); |
- if (!SlotsBuffer::IsTypedSlot(slot)) { |
- PointersUpdatingVisitor::UpdateSlot(heap_, slot); |
- } else { |
- ++slot_idx; |
- DCHECK(slot_idx < buffer_size); |
- UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot), |
- reinterpret_cast<Address>(buffer->Get(slot_idx))); |
- } |
- } |
-} |
- |
- |
-void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) { |
- while (buffer != NULL) { |
- UpdateSlots(buffer); |
- buffer = buffer->next(); |
- } |
-} |
- |
- |
static void UpdatePointer(HeapObject** address, HeapObject* object) { |
MapWord map_word = object->map_word(); |
// Since we only filter invalid slots in old space, the store buffer can |
@@ -3001,33 +2907,33 @@ bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot, |
return false; |
} |
- |
-bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) { |
+HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) { |
+ Page* p = Page::FromAddress(slot); |
// This function does not support large objects right now. |
Space* owner = p->owner(); |
- if (owner == heap_->lo_space() || owner == NULL) { |
+ if (owner == heap_->lo_space() || owner == nullptr) { |
Object* large_object = heap_->lo_space()->FindObject(slot); |
// This object has to exist, otherwise we would not have recorded a slot |
// for it. |
CHECK(large_object->IsHeapObject()); |
HeapObject* large_heap_object = HeapObject::cast(large_object); |
+ |
if (IsMarked(large_heap_object)) { |
- return true; |
+ return large_heap_object; |
} |
- return false; |
+ return nullptr; |
} |
LiveObjectIterator<kBlackObjects> it(p); |
- HeapObject* object = NULL; |
- while ((object = it.Next()) != NULL) { |
+ HeapObject* object = nullptr; |
+ while ((object = it.Next()) != nullptr) { |
int size = object->Size(); |
- |
- if (object->address() > slot) return false; |
+ if (object->address() > slot) return nullptr; |
if (object->address() <= slot && slot < (object->address() + size)) { |
- return true; |
+ return object; |
} |
} |
- return false; |
+ return nullptr; |
} |
@@ -3046,18 +2952,6 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) { |
} |
-void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot, |
- HeapObject* object) { |
- // The target object has to be black. |
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
- |
- // The target object is black but we don't know if the source slot is black. |
- // The source object could have died and the slot could be part of a free |
- // space. Use the mark bit iterator to find out about liveness of the slot. |
- CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); |
-} |
- |
- |
void MarkCompactCollector::EvacuateNewSpacePrologue() { |
NewSpace* new_space = heap()->new_space(); |
NewSpacePageIterator it(new_space->bottom(), new_space->top()); |
@@ -3074,12 +2968,6 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() { |
} |
-void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( |
- SlotsBuffer* evacuation_slots_buffer) { |
- base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); |
- evacuation_slots_buffers_.Add(evacuation_slots_buffer); |
-} |
- |
class MarkCompactCollector::Evacuator : public Malloced { |
public: |
Evacuator(MarkCompactCollector* collector, |
@@ -3089,15 +2977,13 @@ class MarkCompactCollector::Evacuator : public Malloced { |
evacuation_candidates_(evacuation_candidates), |
newspace_evacuation_candidates_(newspace_evacuation_candidates), |
compaction_spaces_(collector->heap()), |
- local_slots_buffer_(nullptr), |
- local_store_buffer_(collector->heap()), |
local_pretenuring_feedback_(HashMap::PointersMatch, |
kInitialLocalPretenuringFeedbackCapacity), |
new_space_visitor_(collector->heap(), &compaction_spaces_, |
- &local_slots_buffer_, &local_store_buffer_, |
+ &old_to_old_slots_, &old_to_new_slots_, |
&local_pretenuring_feedback_), |
old_space_visitor_(collector->heap(), &compaction_spaces_, |
- &local_slots_buffer_, &local_store_buffer_), |
+ &old_to_old_slots_, &old_to_new_slots_), |
duration_(0.0), |
bytes_compacted_(0), |
task_id_(0) {} |
@@ -3134,8 +3020,8 @@ class MarkCompactCollector::Evacuator : public Malloced { |
// Locally cached collector data. |
CompactionSpaceCollection compaction_spaces_; |
- SlotsBuffer* local_slots_buffer_; |
- LocalStoreBuffer local_store_buffer_; |
+ LocalSlotsBuffer old_to_old_slots_; |
+ LocalSlotsBuffer old_to_new_slots_; |
HashMap local_pretenuring_feedback_; |
// Vistors for the corresponding spaces. |
@@ -3213,8 +3099,22 @@ void MarkCompactCollector::Evacuator::Finalize() { |
new_space_visitor_.promoted_size() + |
new_space_visitor_.semispace_copied_size()); |
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); |
- local_store_buffer_.Process(heap()->store_buffer()); |
- collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_); |
+ // Move locally recorded slots to the global remembered sets. |
+ old_to_new_slots_.Iterate( |
+ [](Address slot) { |
+ Page* page = Page::FromAddress(slot); |
+ RememberedSet<OLD_TO_NEW>::Insert(page, slot); |
+ }, |
+ [](SlotType type, Address slot) { UNREACHABLE(); }); |
+ old_to_old_slots_.Iterate( |
+ [](Address slot) { |
+ Page* page = Page::FromAddress(slot); |
+ RememberedSet<OLD_TO_OLD>::Insert(page, slot); |
+ }, |
+ [](SlotType type, Address slot) { |
+ Page* page = Page::FromAddress(slot); |
+ RememberedSet<OLD_TO_OLD>::InsertTyped(page, type, slot); |
+ }); |
} |
class MarkCompactCollector::CompactionTask : public CancelableTask { |
@@ -3521,8 +3421,10 @@ void MarkCompactCollector::InvalidateCode(Code* code) { |
// Ignore all slots that might have been recorded in the body of the |
// deoptimized code object. Assumption: no slots will be recorded for |
// this object after invalidating it. |
- RemoveObjectSlots(code->instruction_start(), |
- code->address() + code->Size()); |
+ Page* page = Page::FromAddress(code->address()); |
+ Address start = code->instruction_start(); |
+ Address end = code->address() + code->Size(); |
+ RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end); |
} |
} |
@@ -3533,21 +3435,6 @@ bool MarkCompactCollector::WillBeDeoptimized(Code* code) { |
} |
-void MarkCompactCollector::RemoveObjectSlots(Address start_slot, |
- Address end_slot) { |
- // Remove entries by replacing them with an old-space slot containing a smi |
- // that is located in an unmovable page. |
- for (Page* p : evacuation_candidates_) { |
- DCHECK(p->IsEvacuationCandidate() || |
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
- if (p->IsEvacuationCandidate()) { |
- SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, |
- end_slot); |
- } |
- } |
-} |
- |
- |
#ifdef VERIFY_HEAP |
static void VerifyAllBlackObjects(MemoryChunk* page) { |
LiveObjectIterator<kAllLiveObjects> it(page); |
@@ -3699,30 +3586,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
GCTracer::Scope gc_scope(heap()->tracer(), |
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS); |
- { |
- GCTracer::Scope gc_scope( |
- heap()->tracer(), |
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
- UpdateSlotsRecordedIn(migration_slots_buffer_); |
- if (FLAG_trace_fragmentation_verbose) { |
- PrintF(" migration slots buffer: %d\n", |
- SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
- } |
- slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_); |
- DCHECK(migration_slots_buffer_ == NULL); |
- |
- // TODO(hpayer): Process the slots buffers in parallel. This has to be done |
- // after evacuation of all pages finishes. |
- int buffers = evacuation_slots_buffers_.length(); |
- for (int i = 0; i < buffers; i++) { |
- SlotsBuffer* buffer = evacuation_slots_buffers_[i]; |
- UpdateSlotsRecordedIn(buffer); |
- slots_buffer_allocator_->DeallocateChain(&buffer); |
- } |
- evacuation_slots_buffers_.Rewind(0); |
- } |
- // Second pass: find pointers to new space and update them. |
PointersUpdatingVisitor updating_visitor(heap()); |
{ |
@@ -3743,6 +3607,26 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
} |
{ |
+ Heap* heap = this->heap(); |
+ GCTracer::Scope gc_scope( |
+ heap->tracer(), |
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED); |
+ |
+ RememberedSet<OLD_TO_OLD>::Iterate(heap, [heap](Address slot) { |
+ PointersUpdatingVisitor::UpdateSlot(heap, |
+ reinterpret_cast<Object**>(slot)); |
+ return REMOVE_SLOT; |
+ }); |
+ Isolate* isolate = heap->isolate(); |
+ PointersUpdatingVisitor* visitor = &updating_visitor; |
+ RememberedSet<OLD_TO_OLD>::IterateTyped( |
+ heap, [isolate, visitor](SlotType type, Address slot) { |
+ UpdateTypedSlot(isolate, visitor, type, slot); |
+ return REMOVE_SLOT; |
+ }); |
+ } |
+ |
+ { |
GCTracer::Scope gc_scope( |
heap()->tracer(), |
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); |
@@ -3751,13 +3635,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() { |
p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
if (p->IsEvacuationCandidate()) { |
- UpdateSlotsRecordedIn(p->slots_buffer()); |
- if (FLAG_trace_fragmentation_verbose) { |
- PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), |
- SlotsBuffer::SizeOfChain(p->slots_buffer())); |
- } |
- slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
- |
// Important: skip list should be cleared only after roots were updated |
// because root iteration traverses the stack and might have to find |
// code objects from non-updated pc pointing into evacuation candidate. |
@@ -4019,41 +3896,13 @@ void MarkCompactCollector::Initialize() { |
IncrementalMarking::Initialize(); |
} |
- |
-void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) { |
- if (FLAG_trace_fragmentation) { |
- PrintF("Page %p is too popular. Disabling evacuation.\n", |
- reinterpret_cast<void*>(page)); |
- } |
- |
- isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow); |
- |
- // TODO(gc) If all evacuation candidates are too popular we |
- // should stop slots recording entirely. |
- page->ClearEvacuationCandidate(); |
- |
- DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE)); |
- page->SetFlag(Page::POPULAR_PAGE); |
- |
- // We were not collecting slots on this page that point |
- // to other evacuation candidates thus we have to |
- // rescan the page after evacuation to discover and update all |
- // pointers to evacuated objects. |
- page->SetFlag(Page::RESCAN_ON_EVACUATION); |
-} |
- |
- |
-void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot, |
+void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot, |
Code* target) { |
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); |
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host)); |
if (target_page->IsEvacuationCandidate() && |
- !ShouldSkipEvacuationSlotRecording(object)) { |
- if (!SlotsBuffer::AddTo(slots_buffer_allocator_, |
- target_page->slots_buffer_address(), |
- SlotsBuffer::CODE_ENTRY_SLOT, slot, |
- SlotsBuffer::FAIL_ON_OVERFLOW)) { |
- EvictPopularEvacuationCandidate(target_page); |
- } |
+ !ShouldSkipEvacuationSlotRecording(host)) { |
+ RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot); |
} |
} |
@@ -4067,7 +3916,7 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { |
MarkBit mark_bit = Marking::MarkBitFrom(host); |
if (Marking::IsBlack(mark_bit)) { |
RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
- RecordRelocSlot(&rinfo, target); |
+ RecordRelocSlot(host, &rinfo, target); |
} |
} |
} |