Index: src/mark-compact.cc |
diff --git a/src/mark-compact.cc b/src/mark-compact.cc |
index b8c812fb2d053a92eba4855d04d6680b3471a3d5..a3d61802a15e9fc0d688cc131224d48f44410cdc 100644 |
--- a/src/mark-compact.cc |
+++ b/src/mark-compact.cc |
@@ -28,6 +28,7 @@ |
#include "v8.h" |
#include "compilation-cache.h" |
+#include "deoptimizer.h" |
#include "execution.h" |
#include "gdb-jit.h" |
#include "global-handles.h" |
@@ -59,6 +60,7 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT |
#endif |
sweep_precisely_(false), |
compacting_(false), |
+ was_marked_incrementally_(false), |
collect_maps_(FLAG_collect_maps), |
tracer_(NULL), |
migration_slots_buffer_(NULL), |
@@ -238,9 +240,7 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
bool MarkCompactCollector::StartCompaction() { |
- // Don't start compaction if we are in the middle of incremental |
- // marking cycle. We did not collect any slots. |
- if (!compacting_ && !heap_->incremental_marking()->IsMarking()) { |
+ if (!compacting_) { |
ASSERT(evacuation_candidates_.length() == 0); |
CollectEvacuationCandidates(heap()->old_pointer_space()); |
@@ -258,22 +258,6 @@ bool MarkCompactCollector::StartCompaction() { |
} |
-void MarkCompactCollector::AbortCompaction() { |
- if (compacting_) { |
- int npages = evacuation_candidates_.length(); |
- for (int i = 0; i < npages; i++) { |
- Page* p = evacuation_candidates_[i]; |
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
- p->ClearEvacuationCandidate(); |
- p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
- } |
- compacting_ = false; |
- evacuation_candidates_.Rewind(0); |
- } |
- ASSERT_EQ(0, evacuation_candidates_.length()); |
-} |
- |
- |
void MarkCompactCollector::CollectGarbage() { |
// Make sure that Prepare() has been called. The individual steps below will |
// update the state as they proceed. |
@@ -463,14 +447,32 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
} |
+void MarkCompactCollector::AbortCompaction() { |
+ if (compacting_) { |
+ int npages = evacuation_candidates_.length(); |
+ for (int i = 0; i < npages; i++) { |
+ Page* p = evacuation_candidates_[i]; |
+ slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
+ p->ClearEvacuationCandidate(); |
+ p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
+ } |
+ compacting_ = false; |
+ evacuation_candidates_.Rewind(0); |
+ invalidated_code_.Rewind(0); |
+ } |
+ ASSERT_EQ(0, evacuation_candidates_.length()); |
+} |
+ |
+ |
void MarkCompactCollector::Prepare(GCTracer* tracer) { |
FLAG_flush_code = false; |
+ was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); |
+ |
// Disable collection of maps if incremental marking is enabled. |
// Map collection algorithm relies on a special map transition tree traversal |
// order which is not implemented for incremental marking. |
- collect_maps_ = FLAG_collect_maps && |
- !heap()->incremental_marking()->IsMarking(); |
+ collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; |
// Rather than passing the tracer around we stash it in a static member |
// variable. |
@@ -491,13 +493,18 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { |
#endif |
// Clear marking bits for precise sweeping to collect all garbage. |
- if (heap()->incremental_marking()->IsMarking() && PreciseSweepingRequired()) { |
+ if (was_marked_incrementally_ && PreciseSweepingRequired()) { |
heap()->incremental_marking()->Abort(); |
ClearMarkbits(heap_); |
AbortCompaction(); |
+ was_marked_incrementally_ = false; |
} |
- if (!FLAG_never_compact) StartCompaction(); |
+ // Don't start compaction if we are in the middle of incremental |
+ // marking cycle. We did not collect any slots. |
+ if (!FLAG_never_compact && !was_marked_incrementally_) { |
+ StartCompaction(); |
+ } |
PagedSpaces spaces; |
for (PagedSpace* space = spaces.next(); |
@@ -507,7 +514,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { |
} |
#ifdef DEBUG |
- if (!heap()->incremental_marking()->IsMarking()) { |
+ if (!was_marked_incrementally_) { |
VerifyMarkbitsAreClean(); |
} |
#endif |
@@ -1973,7 +1980,7 @@ void MarkCompactCollector::MarkLiveObjects() { |
bool incremental_marking_overflowed = false; |
IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
- if (incremental_marking->IsMarking()) { |
+ if (was_marked_incrementally_) { |
// Finalize the incremental marking and check whether we had an overflow. |
// Both markers use grey color to mark overflowed objects so |
// non-incremental marker can deal with them as if overflow |
@@ -2844,7 +2851,125 @@ static void SweepPrecisely(PagedSpace* space, |
} |
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { |
+ Page* p = Page::FromAddress(code->address()); |
+ |
+ if (p->IsEvacuationCandidate() || |
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
+ return false; |
+ } |
+ |
+ Address code_start = code->address(); |
+ Address code_end = code_start + code->Size(); |
+ |
+ uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); |
+ uint32_t end_index = |
+ MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); |
+ |
+ Bitmap* b = p->markbits(); |
+ |
+ MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); |
+ MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); |
+ |
+ MarkBit::CellType* start_cell = start_mark_bit.cell(); |
+ MarkBit::CellType* end_cell = end_mark_bit.cell(); |
+ |
+ if (value) { |
+ MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); |
+ MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; |
+ |
+ if (start_cell == end_cell) { |
+ *start_cell |= start_mask & end_mask; |
+ } else { |
+ *start_cell |= start_mask; |
+ for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { |
+ *cell = ~0; |
+ } |
+ *end_cell |= end_mask; |
+ } |
+ } else { |
+ for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) { |
+ *cell = 0; |
+ } |
+ } |
+ |
+ return true; |
+} |
+ |
+ |
+static bool IsOnInvalidatedCodeObject(Address addr) { |
+ // We did not record any slots in large objects thus |
+ // we can safely go to the page from the slot address. |
+ Page* p = Page::FromAddress(addr); |
+ |
+ // First check owner's identity because old pointer and old data spaces |
+ // are swept lazily and might still have non-zero mark-bits on some |
+ // pages. |
+ if (p->owner()->identity() != CODE_SPACE) return false; |
+ |
+ // In code space only bits on evacuation candidates (but we don't record |
+ // any slots on them) and under invalidated code objects are non-zero. |
+ MarkBit mark_bit = |
+ p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); |
+ |
+ return mark_bit.Get(); |
+} |
+ |
+ |
+void MarkCompactCollector::InvalidateCode(Code* code) { |
+ if (heap_->incremental_marking()->IsCompacting() && |
+ !ShouldSkipEvacuationSlotRecording(code)) { |
+ ASSERT(compacting_); |
+ |
+ // If the object is white than no slots were recorded on it yet. |
+ MarkBit mark_bit = Marking::MarkBitFrom(code); |
+ if (Marking::IsWhite(mark_bit)) return; |
+ |
+ invalidated_code_.Add(code); |
+ } |
+} |
+ |
+ |
+bool MarkCompactCollector::MarkInvalidatedCode() { |
+ bool code_marked = false; |
+ |
+ int length = invalidated_code_.length(); |
+ for (int i = 0; i < length; i++) { |
+ Code* code = invalidated_code_[i]; |
+ |
+ if (SetMarkBitsUnderInvalidatedCode(code, true)) { |
+ code_marked = true; |
+ } |
+ } |
+ |
+ return code_marked; |
+} |
+ |
+ |
+void MarkCompactCollector::RemoveDeadInvalidatedCode() { |
+ int length = invalidated_code_.length(); |
+ for (int i = 0; i < length; i++) { |
+ if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { |
+ int length = invalidated_code_.length(); |
+ for (int i = 0; i < length; i++) { |
+ Code* code = invalidated_code_[i]; |
+ if (code != NULL) { |
+ code->Iterate(visitor); |
+ SetMarkBitsUnderInvalidatedCode(code, false); |
+ } |
+ } |
+ invalidated_code_.Rewind(0); |
+} |
+ |
+ |
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
+ bool code_slots_filtering_required = MarkInvalidatedCode(); |
+ |
EvacuateNewSpace(); |
EvacuatePages(); |
@@ -2874,12 +2999,26 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
} |
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_); |
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
+ migration_slots_buffer_, |
+ code_slots_filtering_required); |
if (FLAG_trace_fragmentation) { |
PrintF(" migration slots buffer: %d\n", |
SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
} |
+ if (compacting_ && was_marked_incrementally_) { |
+ // It's difficult to filter out slots recorded for large objects. |
+ LargeObjectIterator it(heap_->lo_space()); |
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
+ Page* p = Page::FromAddress(obj->address()); |
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
+ obj->Iterate(&updating_visitor); |
+ p->ClearFlag(Page::RESCAN_ON_EVACUATION); |
+ } |
+ } |
+ } |
+ |
int npages = evacuation_candidates_.length(); |
for (int i = 0; i < npages; i++) { |
Page* p = evacuation_candidates_[i]; |
@@ -2887,7 +3026,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
if (p->IsEvacuationCandidate()) { |
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer()); |
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
+ p->slots_buffer(), |
+ code_slots_filtering_required); |
if (FLAG_trace_fragmentation) { |
PrintF(" page %p slots buffer: %d\n", |
reinterpret_cast<void*>(p), |
@@ -2959,6 +3100,10 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
EvacuationWeakObjectRetainer evacuation_object_retainer; |
heap()->ProcessWeakReferences(&evacuation_object_retainer); |
+ // Visit invalidated code (we ignored all slots on it) and clear mark-bits |
+ // under it. |
+ ProcessInvalidatedCode(&updating_visitor); |
+ |
#ifdef DEBUG |
if (FLAG_verify_heap) { |
VerifyEvacuation(heap_); |
@@ -3480,11 +3625,16 @@ void MarkCompactCollector::SweepSpaces() { |
// non-live objects. |
SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
SweepSpace(heap()->old_data_space(), how_to_sweep); |
+ |
+ RemoveDeadInvalidatedCode(); |
SweepSpace(heap()->code_space(), PRECISE); |
+ |
SweepSpace(heap()->cell_space(), PRECISE); |
+ |
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
EvacuateNewSpaceAndCandidates(); |
} |
+ |
// ClearNonLiveTransitions depends on precise sweeping of map space to |
// detect whether unmarked map became dead in this collection or in one |
// of the previous ones. |
@@ -3626,6 +3776,29 @@ void SlotsBuffer::UpdateSlots(Heap* heap) { |
} |
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { |
+ PointersUpdatingVisitor v(heap); |
+ |
+ for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { |
+ ObjectSlot slot = slots_[slot_idx]; |
+ if (!IsTypedSlot(slot)) { |
+ if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { |
+ UpdateSlot(slot); |
+ } |
+ } else { |
+ ++slot_idx; |
+ ASSERT(slot_idx < idx_); |
+ Address pc = reinterpret_cast<Address>(slots_[slot_idx]); |
+ if (!IsOnInvalidatedCodeObject(pc)) { |
+ UpdateSlot(&v, |
+ DecodeSlotType(slot), |
+ reinterpret_cast<Address>(slots_[slot_idx])); |
+ } |
+ } |
+ } |
+} |
+ |
+ |
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { |
return new SlotsBuffer(next_buffer); |
} |